prompt listlengths 1 1 | compression_prompt listlengths 1 1 | target stringlengths 1.03k 828k |
|---|---|---|
[
{
"content": "Here is some code:\n```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code gen... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information... | ```python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LocalNetworkGateway(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param local_network_address_space: Local network site address space.
:type local_network_address_space:
~azure.mgmt.network.v2017_11_01.models.AddressSpace
:param gateway_ip_address: IP address of local network gateway.
:type gateway_ip_address: str
:param bgp_settings: Local network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2017_11_01.models.BgpSettings
:param resource_guid: The resource GUID property of the
LocalNetworkGateway resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting',
and 'Failed'.
:vartype provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'local_network_address_space': {'key': 'properties.localNetworkAddressSpace', 'type': 'AddressSpace'},
'gateway_ip_address': {'key': 'properties.gatewayIpAddress', 'type': 'str'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LocalNetworkGateway, self).__init__(**kwargs)
self.local_network_address_space = kwargs.get('local_network_address_space', None)
self.gateway_ip_address = kwargs.get('gateway_ip_address', None)
self.bgp_settings = kwargs.get('bgp_settings', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = None
self.etag = kwargs.get('etag', None)
``` |
[
{
"content": "```python\n# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2011-2012 Smile. All Rights Reserved\n#\n# This program is free software: you can redistribute it and/or mod... | [
{
"content": "<|memory_start|>```python\n# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2011-2012 Smile. All Rights Reserved\n#\n# This program is free software: you can redistribu... | ```python
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 Smile. All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
class smile_activity_profile(osv.osv):
_name = 'smile.activity.profile'
_columns = {
'name': fields.char('Name', size=32, required=True),
}
smile_activity_profile()
``` |
[
{
"content": "Here is some code:\n```python\n#!/usr/bin/python\n# Copyright (c) 2012 The Native Client Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport optparse\nimport os.path\nimport shutil\nimport subprocess\nimpor... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n#!/usr/bin/python\n# Copyright (c) 2012 The Native Client Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport optparse\nimport os.path\nimport shutil\nimport s... | ```python
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os.path
import shutil
import subprocess
import stat
import sys
import time
import traceback
ARCH_MAP = {
'32': {
'gyp_arch': 'ia32',
'scons_platform': 'x86-32',
},
'64': {
'gyp_arch': 'x64',
'scons_platform': 'x86-64',
},
'arm': {
'gyp_arch': 'arm',
'scons_platform': 'arm',
},
'mips32': {
'gyp_arch': 'mips32',
'scons_platform': 'mips32',
},
}
def RunningOnBuildbot():
return os.environ.get('BUILDBOT_SLAVE_TYPE') is not None
def GetHostPlatform():
sys_platform = sys.platform.lower()
if sys_platform.startswith('linux'):
return 'linux'
elif sys_platform in ('win', 'win32', 'windows', 'cygwin'):
return 'win'
elif sys_platform in ('darwin', 'mac'):
return 'mac'
else:
raise Exception('Can not determine the platform!')
def SetDefaultContextAttributes(context):
"""
Set default values for the attributes needed by the SCons function, so that
SCons can be run without needing ParseStandardCommandLine
"""
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = 'opt'
context['default_scons_mode'] = ['opt-host', 'nacl']
context['default_scons_platform'] = ('x86-64' if platform == 'win'
else 'x86-32')
context['android'] = False
context['clang'] = False
context['asan'] = False
context['pnacl'] = False
context['use_glibc'] = False
context['use_breakpad_tools'] = False
context['max_jobs'] = 8
context['scons_args'] = []
# Windows-specific environment manipulation
def SetupWindowsEnvironment(context):
# Poke around looking for MSVC. We should do something more principled in
# the future.
# The name of Program Files can differ, depending on the bittage of Windows.
program_files = r'c:\Program Files (x86)'
if not os.path.exists(program_files):
program_files = r'c:\Program Files'
if not os.path.exists(program_files):
raise Exception('Cannot find the Program Files directory!')
# The location of MSVC can differ depending on the version.
msvc_locs = [
('Microsoft Visual Studio 12.0', 'VS120COMNTOOLS', '2013'),
('Microsoft Visual Studio 10.0', 'VS100COMNTOOLS', '2010'),
('Microsoft Visual Studio 9.0', 'VS90COMNTOOLS', '2008'),
('Microsoft Visual Studio 8.0', 'VS80COMNTOOLS', '2005'),
]
for dirname, comntools_var, gyp_msvs_version in msvc_locs:
msvc = os.path.join(program_files, dirname)
context.SetEnv('GYP_MSVS_VERSION', gyp_msvs_version)
if os.path.exists(msvc):
break
else:
# The break statement did not execute.
raise Exception('Cannot find MSVC!')
# Put MSVC in the path.
vc = os.path.join(msvc, 'VC')
comntools = os.path.join(msvc, 'Common7', 'Tools')
perf = os.path.join(msvc, 'Team Tools', 'Performance Tools')
context.SetEnv('PATH', os.pathsep.join([
context.GetEnv('PATH'),
vc,
comntools,
perf]))
# SCons needs this variable to find vsvars.bat.
# The end slash is needed because the batch files expect it.
context.SetEnv(comntools_var, comntools + '\\')
# This environment variable will SCons to print debug info while it searches
# for MSVC.
context.SetEnv('SCONS_MSCOMMON_DEBUG', '-')
# Needed for finding devenv.
context['msvc'] = msvc
SetupGyp(context, [])
def SetupGyp(context, extra_vars=[]):
if RunningOnBuildbot():
goma_opts = [
'use_goma=1',
'gomadir=/b/build/goma',
]
else:
goma_opts = []
context.SetEnv('GYP_DEFINES', ' '.join(
context['gyp_vars'] + goma_opts + extra_vars))
def SetupLinuxEnvironment(context):
if context['arch'] == 'mips32':
# Ensure the trusted mips toolchain is installed.
cmd = ['build/package_version/package_version.py', '--packages',
'linux_x86/mips_trusted', 'sync', '-x']
Command(context, cmd)
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupMacEnvironment(context):
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupAndroidEnvironment(context):
SetupGyp(context, ['OS=android', 'target_arch='+context['gyp_arch']])
context.SetEnv('GYP_CROSSCOMPILE', '1')
def ParseStandardCommandLine(context):
"""
The standard buildbot scripts require 3 arguments to run. The first
argument (dbg/opt) controls if the build is a debug or a release build. The
second argument (32/64) controls the machine architecture being targeted.
The third argument (newlib/glibc) controls which c library we're using for
the nexes. Different buildbots may have different sets of arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-n', '--dry-run', dest='dry_run', default=False,
action='store_true', help='Do not execute any commands.')
parser.add_option('--inside-toolchain', dest='inside_toolchain',
default=bool(os.environ.get('INSIDE_TOOLCHAIN')),
action='store_true', help='Inside toolchain build.')
parser.add_option('--android', dest='android', default=False,
action='store_true', help='Build for Android.')
parser.add_option('--clang', dest='clang', default=False,
action='store_true', help='Build trusted code with Clang.')
parser.add_option('--coverage', dest='coverage', default=False,
action='store_true',
help='Build and test for code coverage.')
parser.add_option('--validator', dest='validator', default=False,
action='store_true',
help='Only run validator regression test')
parser.add_option('--asan', dest='asan', default=False,
action='store_true', help='Build trusted code with ASan.')
parser.add_option('--scons-args', dest='scons_args', default =[],
action='append', help='Extra scons arguments.')
parser.add_option('--step-suffix', metavar='SUFFIX', default='',
help='Append SUFFIX to buildbot step names.')
parser.add_option('--no-gyp', dest='no_gyp', default=False,
action='store_true', help='Do not run the gyp build')
parser.add_option('--no-goma', dest='no_goma', default=False,
action='store_true', help='Do not run with goma')
parser.add_option('--use-breakpad-tools', dest='use_breakpad_tools',
default=False, action='store_true',
help='Use breakpad tools for testing')
parser.add_option('--skip-build', dest='skip_build', default=False,
action='store_true',
help='Skip building steps in buildbot_pnacl')
parser.add_option('--skip-run', dest='skip_run', default=False,
action='store_true',
help='Skip test-running steps in buildbot_pnacl')
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Expected 3 arguments: mode arch toolchain')
# script + 3 args == 4
mode, arch, toolchain = args
if mode not in ('dbg', 'opt', 'coverage'):
parser.error('Invalid mode %r' % mode)
if arch not in ARCH_MAP:
parser.error('Invalid arch %r' % arch)
if toolchain not in ('newlib', 'glibc', 'pnacl', 'nacl_clang'):
parser.error('Invalid toolchain %r' % toolchain)
# TODO(ncbray) allow a command-line override
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = mode
context['arch'] = arch
context['android'] = options.android
# ASan is Clang, so set the flag to simplify other checks.
context['clang'] = options.clang or options.asan
context['validator'] = options.validator
context['asan'] = options.asan
# TODO(ncbray) turn derived values into methods.
context['gyp_mode'] = {
'opt': 'Release',
'dbg': 'Debug',
'coverage': 'Debug'}[mode]
context['gn_is_debug'] = {
'opt': 'false',
'dbg': 'true',
'coverage': 'true'}[mode]
context['gyp_arch'] = ARCH_MAP[arch]['gyp_arch']
context['gyp_vars'] = []
if context['clang']:
context['gyp_vars'].append('clang=1')
if context['asan']:
context['gyp_vars'].append('asan=1')
context['default_scons_platform'] = ARCH_MAP[arch]['scons_platform']
context['default_scons_mode'] = ['nacl']
# Only Linux can build trusted code on ARM.
# TODO(mcgrathr): clean this up somehow
if arch != 'arm' or platform == 'linux':
context['default_scons_mode'] += [mode + '-host']
context['use_glibc'] = toolchain == 'glibc'
context['pnacl'] = toolchain == 'pnacl'
context['nacl_clang'] = toolchain == 'nacl_clang'
context['max_jobs'] = 8
context['dry_run'] = options.dry_run
context['inside_toolchain'] = options.inside_toolchain
context['step_suffix'] = options.step_suffix
context['no_gyp'] = options.no_gyp
context['no_goma'] = options.no_goma
context['coverage'] = options.coverage
context['use_breakpad_tools'] = options.use_breakpad_tools
context['scons_args'] = options.scons_args
context['skip_build'] = options.skip_build
context['skip_run'] = options.skip_run
# Don't run gyp on coverage builds.
if context['coverage']:
context['no_gyp'] = True
for key, value in sorted(context.config.items()):
print '%s=%s' % (key, value)
def EnsureDirectoryExists(path):
"""
Create a directory if it does not already exist.
Does not mask failures, but there really shouldn't be any.
"""
if not os.path.exists(path):
os.makedirs(path)
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
try:
RemovePath(path)
except Exception:
print 'Failed to remove %s' % path
else:
print 'Skipping %s' % path
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if GetHostPlatform() == 'win':
count = 0
while True:
try:
op(*args)
break
except Exception:
print "FAILED: %s %s" % (op.__name__, repr(args))
count += 1
if count < 5:
print "RETRY: %s %s" % (op.__name__, repr(args))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def PermissionsFixOnError(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def _RemoveDirectory(path):
print 'Removing %s' % path
if os.path.exists(path):
shutil.rmtree(path, onerror=PermissionsFixOnError)
print ' Succeeded.'
else:
print ' Path does not exist, nothing to do.'
def RemoveDirectory(path):
"""
Remove a directory if it exists.
Does not mask failures, although it does retry a few times on Windows.
"""
Retry(_RemoveDirectory, path)
def RemovePath(path):
"""Remove a path, file or directory."""
if os.path.isdir(path):
RemoveDirectory(path)
else:
if os.path.isfile(path) and not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
os.remove(path)
# This is a sanity check so Command can print out better error information.
def FileCanBeFound(name, paths):
# CWD
if os.path.exists(name):
return True
# Paths with directories are not resolved using the PATH variable.
if os.path.dirname(name):
return False
# In path
for path in paths.split(os.pathsep):
full = os.path.join(path, name)
if os.path.exists(full):
return True
return False
def RemoveGypBuildDirectories():
# Remove all directories on all platforms. Overkill, but it allows for
# straight-line code.
# Windows
RemoveDirectory('build/Debug')
RemoveDirectory('build/Release')
RemoveDirectory('build/Debug-Win32')
RemoveDirectory('build/Release-Win32')
RemoveDirectory('build/Debug-x64')
RemoveDirectory('build/Release-x64')
# Linux and Mac
RemoveDirectory('../xcodebuild')
RemoveDirectory('../out')
RemoveDirectory('src/third_party/nacl_sdk/arm-newlib')
def RemoveSconsBuildDirectories():
RemoveDirectory('scons-out')
RemoveDirectory('breakpad-out')
# Execute a command using Python's subprocess module.
def Command(context, cmd, cwd=None):
print 'Running command: %s' % ' '.join(cmd)
# Python's subprocess has a quirk. A subprocess can execute with an
# arbitrary, user-defined environment. The first argument of the command,
# however, is located using the PATH variable of the Python script that is
# launching the subprocess. Modifying the PATH in the environment passed to
# the subprocess does not affect Python's search for the first argument of
# the command (the executable file.) This is a little counter intuitive,
# so we're forcing the search to use the same PATH variable as is seen by
# the subprocess.
env = context.MakeCommandEnv()
script_path = os.environ['PATH']
os.environ['PATH'] = env['PATH']
try:
if FileCanBeFound(cmd[0], env['PATH']) or context['dry_run']:
# Make sure that print statements before the subprocess call have been
# flushed, otherwise the output of the subprocess call may appear before
# the print statements.
sys.stdout.flush()
if context['dry_run']:
retcode = 0
else:
retcode = subprocess.call(cmd, cwd=cwd, env=env)
else:
# Provide a nicer failure message.
# If subprocess cannot find the executable, it will throw a cryptic
# exception.
print 'Executable %r cannot be found.' % cmd[0]
retcode = 1
finally:
os.environ['PATH'] = script_path
print 'Command return code: %d' % retcode
if retcode != 0:
raise StepFailed()
return retcode
# A specialized version of CommandStep.
def SCons(context, mode=None, platform=None, parallel=False, browser_test=False,
args=(), cwd=None):
python = sys.executable
if mode is None: mode = context['default_scons_mode']
if platform is None: platform = context['default_scons_platform']
if parallel:
jobs = context['max_jobs']
else:
jobs = 1
cmd = []
if browser_test and context.Linux():
# Although we could use the "browser_headless=1" Scons option, it runs
# xvfb-run once per Chromium invocation. This is good for isolating
# the tests, but xvfb-run has a stupid fixed-period sleep, which would
# slow down the tests unnecessarily.
cmd.extend(['xvfb-run', '--auto-servernum'])
cmd.extend([
python, 'scons.py',
'--verbose',
'-k',
'-j%d' % jobs,
'--mode='+','.join(mode),
'platform='+platform,
])
cmd.extend(context['scons_args'])
if context['clang']: cmd.append('--clang')
if context['asan']: cmd.append('--asan')
if context['use_glibc']: cmd.append('--nacl_glibc')
if context['pnacl']: cmd.append('bitcode=1')
if context['nacl_clang']: cmd.append('nacl_clang=1')
if context['use_breakpad_tools']:
cmd.append('breakpad_tools_dir=breakpad-out')
if context['android']:
cmd.append('android=1')
# Append used-specified arguments.
cmd.extend(args)
Command(context, cmd, cwd)
class StepFailed(Exception):
"""
Thrown when the step has failed.
"""
class StopBuild(Exception):
"""
Thrown when the entire build should stop. This does not indicate a failure,
in of itself.
"""
class Step(object):
"""
This class is used in conjunction with a Python "with" statement to ensure
that the preamble and postamble of each build step gets printed and failures
get logged. This class also ensures that exceptions thrown inside a "with"
statement don't take down the entire build.
"""
def __init__(self, name, status, halt_on_fail=True):
self.status = status
if 'step_suffix' in status.context:
suffix = status.context['step_suffix']
else:
suffix = ''
self.name = name + suffix
self.halt_on_fail = halt_on_fail
self.step_failed = False
# Called on entry to a 'with' block.
def __enter__(self):
sys.stdout.flush()
print
print '@@@BUILD_STEP %s@@@' % self.name
self.status.ReportBegin(self.name)
# The method is called on exit from a 'with' block - even for non-local
# control flow, i.e. exceptions, breaks, continues, returns, etc.
# If an exception is thrown inside a block wrapped with a 'with' statement,
# the __exit__ handler can suppress the exception by returning True. This is
# used to isolate each step in the build - if an exception occurs in a given
# step, the step is treated as a failure. This allows the postamble for each
# step to be printed and also allows the build to continue of the failure of
# a given step doesn't halt the build.
def __exit__(self, type, exception, trace):
sys.stdout.flush()
if exception is None:
# If exception is None, no exception occurred.
step_failed = False
elif isinstance(exception, StepFailed):
step_failed = True
print
print 'Halting build step because of failure.'
print
else:
step_failed = True
print
print 'The build step threw an exception...'
print
traceback.print_exception(type, exception, trace, file=sys.stdout)
print
if step_failed:
self.status.ReportFail(self.name)
print '@@@STEP_FAILURE@@@'
if self.halt_on_fail:
print
print 'Entire build halted because %s failed.' % self.name
sys.stdout.flush()
raise StopBuild()
else:
self.status.ReportPass(self.name)
sys.stdout.flush()
# Suppress any exception that occurred.
return True
# Adds an arbitrary link inside the build stage on the waterfall.
def StepLink(text, link):
print '@@@STEP_LINK@%s@%s@@@' % (text, link)
# Adds arbitrary text inside the build stage on the waterfall.
def StepText(text):
print '@@@STEP_TEXT@%s@@@' % (text)
class BuildStatus(object):
"""
Keeps track of the overall status of the build.
"""
def __init__(self, context):
self.context = context
self.ever_failed = False
self.steps = []
def ReportBegin(self, name):
pass
def ReportPass(self, name):
self.steps.append((name, 'passed'))
def ReportFail(self, name):
self.steps.append((name, 'failed'))
self.ever_failed = True
# Handy info when this script is run outside of the buildbot.
def DisplayBuildStatus(self):
print
for step, status in self.steps:
print '%-40s[%s]' % (step, status)
print
if self.ever_failed:
print 'Build failed.'
else:
print 'Build succeeded.'
def ReturnValue(self):
return int(self.ever_failed)
class BuildContext(object):
"""
Encapsulates the information needed for running a build command. This
includes environment variables and default arguments for SCons invocations.
"""
# Only allow these attributes on objects of this type.
__slots__ = ['status', 'global_env', 'config']
def __init__(self):
# The contents of global_env override os.environ for any commands run via
# self.Command(...)
self.global_env = {}
# PATH is a special case. See: Command.
self.global_env['PATH'] = os.environ.get('PATH', '')
self.config = {}
self['dry_run'] = False
# Emulate dictionary subscripting.
def __getitem__(self, key):
return self.config[key]
# Emulate dictionary subscripting.
def __setitem__(self, key, value):
self.config[key] = value
# Emulate dictionary membership test
def __contains__(self, key):
return key in self.config
def Windows(self):
return self.config['platform'] == 'win'
def Linux(self):
return self.config['platform'] == 'linux'
def Mac(self):
return self.config['platform'] == 'mac'
def GetEnv(self, name, default=None):
return self.global_env.get(name, default)
def SetEnv(self, name, value):
self.global_env[name] = str(value)
def MakeCommandEnv(self):
# The external environment is not sanitized.
e = dict(os.environ)
# Arbitrary variables can be overridden.
e.update(self.global_env)
return e
def RunBuild(script, status):
try:
script(status, status.context)
except StopBuild:
pass
# Emit a summary step for three reasons:
# - The annotator will attribute non-zero exit status to the last build step.
# This can misattribute failures to the last build step.
# - runtest.py wraps the builds to scrape perf data. It emits an annotator
# tag on exit which misattributes perf results to the last build step.
# - Provide a label step in which to show summary result.
# Otherwise these go back to the preamble.
with Step('summary', status):
if status.ever_failed:
print 'There were failed stages.'
else:
print 'Success.'
# Display a summary of the build.
status.DisplayBuildStatus()
sys.exit(status.ReturnValue())
``` |
[
{
"content": "Replicate the source code:\n```python\nfrom enigma import getPrevAsciiCode\nfrom Tools.NumericalTextInput import NumericalTextInput\nfrom Tools.Directories import resolveFilename, SCOPE_CONFIG, fileExists\nfrom Components.Harddisk import harddiskmanager\nfrom copy import copy as copy_copy\nfrom os... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nfrom enigma import getPrevAsciiCode\nfrom Tools.NumericalTextInput import NumericalTextInput\nfrom Tools.Directories import resolveFilename, SCOPE_CONFIG, fileExists\nfrom Components.Harddisk import harddiskmanager\nfrom copy import copy as co... | ```python
from enigma import getPrevAsciiCode
from Tools.NumericalTextInput import NumericalTextInput
from Tools.Directories import resolveFilename, SCOPE_CONFIG, fileExists
from Components.Harddisk import harddiskmanager
from copy import copy as copy_copy
from os import path as os_path
from time import localtime, strftime
# ConfigElement, the base class of all ConfigElements.
# it stores:
# value the current value, usefully encoded.
# usually a property which retrieves _value,
# and maybe does some reformatting
# _value the value as it's going to be saved in the configfile,
# though still in non-string form.
# this is the object which is actually worked on.
# default the initial value. If _value is equal to default,
# it will not be stored in the config file
# saved_value is a text representation of _value, stored in the config file
#
# and has (at least) the following methods:
# save() stores _value into saved_value,
# (or stores 'None' if it should not be stored)
# load() loads _value from saved_value, or loads
# the default if saved_value is 'None' (default)
# or invalid.
#
class ConfigElement(object):
def __init__(self):
self.extra_args = {}
self.saved_value = None
self.save_forced = False
self.last_value = None
self.save_disabled = False
self.__notifiers = { }
self.__notifiers_final = { }
self.enabled = True
self.callNotifiersOnSaveAndCancel = False
def getNotifiers(self):
return [func for (func, val, call_on_save_and_cancel) in self.__notifiers.itervalues()]
def setNotifiers(self, val):
print "just readonly access to notifiers is allowed! append/remove doesnt work anymore! please use addNotifier, removeNotifier, clearNotifiers"
notifiers = property(getNotifiers, setNotifiers)
def getNotifiersFinal(self):
return [func for (func, val, call_on_save_and_cancel) in self.__notifiers_final.itervalues()]
def setNotifiersFinal(self, val):
print "just readonly access to notifiers_final is allowed! append/remove doesnt work anymore! please use addNotifier, removeNotifier, clearNotifiers"
notifiers_final = property(getNotifiersFinal, setNotifiersFinal)
# you need to override this to do input validation
def setValue(self, value):
self._value = value
self.changed()
def getValue(self):
return self._value
value = property(getValue, setValue)
# you need to override this if self.value is not a string
def fromstring(self, value):
return value
# you can overide this for fancy default handling
def load(self):
sv = self.saved_value
if sv is None:
self.value = self.default
else:
self.value = self.fromstring(sv)
def tostring(self, value):
return str(value)
# you need to override this if str(self.value) doesn't work
def save(self):
if self.save_disabled or (self.value == self.default and not self.save_forced):
self.saved_value = None
else:
self.saved_value = self.tostring(self.value)
if self.callNotifiersOnSaveAndCancel:
self.changed()
def cancel(self):
self.load()
if self.callNotifiersOnSaveAndCancel:
self.changed()
def isChanged(self):
sv = self.saved_value
if sv is None and self.value == self.default:
return False
return self.tostring(self.value) != sv
def changed(self):
if self.__notifiers:
for x in self.notifiers:
try:
if self.extra_args and self.extra_args[x]:
x(self, self.extra_args[x])
else:
x(self)
except:
x(self)
def changedFinal(self):
if self.__notifiers_final:
for x in self.notifiers_final:
try:
if self.extra_args and self.extra_args[x]:
x(self, self.extra_args[x])
else:
x(self)
except:
x(self)
# immediate_feedback = True means call notifier on every value CHANGE
# immediate_feedback = False means call notifier on leave the config element (up/down) when value have CHANGED
# call_on_save_or_cancel = True means call notifier always on save/cancel.. even when value have not changed
def addNotifier(self, notifier, initial_call = True, immediate_feedback = True, call_on_save_or_cancel = False, extra_args=None):
if not extra_args: extra_args = []
assert callable(notifier), "notifiers must be callable"
try:
self.extra_args[notifier] = extra_args
except: pass
if immediate_feedback:
self.__notifiers[str(notifier)] = (notifier, self.value, call_on_save_or_cancel)
else:
self.__notifiers_final[str(notifier)] = (notifier, self.value, call_on_save_or_cancel)
# CHECKME:
# do we want to call the notifier
# - at all when adding it? (yes, though optional)
# - when the default is active? (yes)
# - when no value *yet* has been set,
# because no config has ever been read (currently yes)
# (though that's not so easy to detect.
# the entry could just be new.)
if initial_call:
if extra_args:
notifier(self,extra_args)
else:
notifier(self)
def removeNotifier(self, notifier):
try:
del self.__notifiers[str(notifier)]
except:
try:
del self.__notifiers_final[str(notifier)]
except:
pass
def clearNotifiers(self):
self.__notifiers = { }
self.__notifiers_final = { }
def disableSave(self):
self.save_disabled = True
def __call__(self, selected):
return self.getMulti(selected)
def onSelect(self, session):
pass
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
KEY_LEFT = 0
KEY_RIGHT = 1
KEY_OK = 2
KEY_DELETE = 3
KEY_BACKSPACE = 4
KEY_HOME = 5
KEY_END = 6
KEY_TOGGLEOW = 7
KEY_ASCII = 8
KEY_TIMEOUT = 9
KEY_NUMBERS = range(12, 12+10)
KEY_0 = 12
KEY_9 = 12+9
def getKeyNumber(key):
assert key in KEY_NUMBERS
return key - KEY_0
class choicesList(object): # XXX: we might want a better name for this
LIST_TYPE_LIST = 1
LIST_TYPE_DICT = 2
def __init__(self, choices, type = None):
self.choices = choices
if type is None:
if isinstance(choices, list):
self.type = choicesList.LIST_TYPE_LIST
elif isinstance(choices, dict):
self.type = choicesList.LIST_TYPE_DICT
else:
assert False, "choices must be dict or list!"
else:
self.type = type
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices.keys()
return ret or [""]
def __iter__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices
return iter(ret or [""])
def __len__(self):
return len(self.choices) or 1
def updateItemDescription(self, index, descr):
if self.type == choicesList.LIST_TYPE_LIST:
orig = self.choices[index]
assert isinstance(orig, tuple)
self.choices[index] = (orig[0], descr)
else:
key = self.choices.keys()[index]
self.choices[key] = descr
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
ret = self.choices[index]
if isinstance(ret, tuple):
ret = ret[0]
return ret
return self.choices.keys()[index]
def index(self, value):
try:
return self.__list__().index(value)
except (ValueError, IndexError):
# occurs e.g. when default is not in list
return 0
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
orig = self.choices[index]
if isinstance(orig, tuple):
self.choices[index] = (value, orig[1])
else:
self.choices[index] = value
else:
key = self.choices.keys()[index]
orig = self.choices[key]
del self.choices[key]
self.choices[value] = orig
def default(self):
choices = self.choices
if not choices:
return ""
if self.type is choicesList.LIST_TYPE_LIST:
default = choices[0]
if isinstance(default, tuple):
default = default[0]
else:
default = choices.keys()[0]
return default
class descriptionList(choicesList): # XXX: we might want a better name for this
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[1] for x in self.choices]
else:
ret = self.choices.values()
return ret or [""]
def __iter__(self):
return iter(self.__list__())
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
for x in self.choices:
if isinstance(x, tuple):
if x[0] == index:
return str(x[1])
elif x == index:
return str(x)
return str(index) # Fallback!
else:
return str(self.choices.get(index, ""))
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
i = self.index(index)
orig = self.choices[i]
if isinstance(orig, tuple):
self.choices[i] = (orig[0], value)
else:
self.choices[i] = value
else:
self.choices[index] = value
#
# ConfigSelection is a "one of.."-type.
# it has the "choices", usually a list, which contains
# (id, desc)-tuples (or just only the ids, in case the id
# will be used as description)
#
# all ids MUST be plain strings.
#
class ConfigSelection(ConfigElement):
def __init__(self, choices, default = None):
ConfigElement.__init__(self)
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self._descr = None
self.default = self._value = self.last_value = default
def setChoices(self, choices, default = None):
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self.default = default
if self.value not in self.choices:
self.value = default
def setValue(self, value):
if value in self.choices:
self._value = value
else:
self._value = self.default
self._descr = None
self.changed()
def tostring(self, val):
return val
def getValue(self):
return self._value
def setCurrentText(self, text):
i = self.choices.index(self.value)
self.choices[i] = text
self._descr = self.description[text] = text
self._value = text
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
# GUI
def handleKey(self, key):
nchoices = len(self.choices)
if nchoices > 1:
i = self.choices.index(self.value)
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
def selectNext(self):
nchoices = len(self.choices)
i = self.choices.index(self.value)
self.value = self.choices[(i + 1) % nchoices]
def getText(self):
if self._descr is None:
self._descr = self.description[self.value]
return self._descr
def getMulti(self, selected):
if self._descr is None:
self._descr = self.description[self.value]
return ("text", self._descr)
# HTML
def getHTML(self, id):
res = ""
for v in self.choices:
descr = self.description[v]
if self.value == v:
checked = 'checked="checked" '
else:
checked = ''
res += '<input type="radio" name="' + id + '" ' + checked + 'value="' + v + '">' + descr + "</input></br>\n"
return res
def unsafeAssign(self, value):
# setValue does check if value is in choices. This is safe enough.
self.value = value
description = property(lambda self: descriptionList(self.choices.choices, self.choices.type))
# a binary decision.
#
# several customized versions exist for different
# descriptions.
#
class ConfigBoolean(ConfigElement):
def __init__(self, default = False, descriptions = {False: _("false"), True: _("true")}):
ConfigElement.__init__(self)
self.descriptions = descriptions
self.value = self.last_value = self.default = default
def handleKey(self, key):
if key in (KEY_LEFT, KEY_RIGHT):
self.value = not self.value
elif key == KEY_HOME:
self.value = False
elif key == KEY_END:
self.value = True
def getText(self):
return self.descriptions[self.value]
def getMulti(self, selected):
return ("text", self.descriptions[self.value])
def tostring(self, value):
if not value:
return "false"
else:
return "true"
def fromstring(self, val):
if val == "true":
return True
else:
return False
def getHTML(self, id):
if self.value:
checked = ' checked="checked"'
else:
checked = ''
return '<input type="checkbox" name="' + id + '" value="1" ' + checked + " />"
# this is FLAWED. and must be fixed.
def unsafeAssign(self, value):
if value == "1":
self.value = True
else:
self.value = False
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
class ConfigYesNo(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = {False: _("no"), True: _("yes")})
class ConfigOnOff(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = {False: _("off"), True: _("on")})
class ConfigEnableDisable(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = {False: _("disable"), True: _("enable")})
class ConfigDateTime(ConfigElement):
def __init__(self, default, formatstring, increment = 86400):
ConfigElement.__init__(self)
self.increment = increment
self.formatstring = formatstring
self.value = self.last_value = self.default = int(default)
def handleKey(self, key):
if key == KEY_LEFT:
self.value -= self.increment
elif key == KEY_RIGHT:
self.value += self.increment
elif key == KEY_HOME or key == KEY_END:
self.value = self.default
def getText(self):
return strftime(self.formatstring, localtime(self.value))
def getMulti(self, selected):
return "text", strftime(self.formatstring, localtime(self.value))
def fromstring(self, val):
return int(val)
# *THE* mighty config element class
#
# allows you to store/edit a sequence of values.
# can be used for IP-addresses, dates, plain integers, ...
# several helper exist to ease this up a bit.
#
class ConfigSequence(ConfigElement):
def __init__(self, seperator, limits, default, censor_char = ""):
ConfigElement.__init__(self)
assert isinstance(limits, list) and len(limits[0]) == 2, "limits must be [(min, max),...]-tuple-list"
assert censor_char == "" or len(censor_char) == 1, "censor char must be a single char (or \"\")"
#assert isinstance(default, list), "default must be a list"
#assert isinstance(default[0], int), "list must contain numbers"
#assert len(default) == len(limits), "length must match"
self.marked_pos = 0
self.seperator = seperator
self.limits = limits
self.censor_char = censor_char
self.last_value = self.default = default
self.value = copy_copy(default)
self.endNotifier = None
def validate(self):
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
if self._value[num] < self.limits[num][0]:
self._value[num] = self.limits[num][0]
if self._value[num] > self.limits[num][1]:
self._value[num] = self.limits[num][1]
num += 1
if self.marked_pos >= max_pos:
if self.endNotifier:
for x in self.endNotifier:
x(self)
self.marked_pos = max_pos - 1
if self.marked_pos < 0:
self.marked_pos = 0
def validatePos(self):
if self.marked_pos < 0:
self.marked_pos = 0
total_len = sum([len(str(x[1])) for x in self.limits])
if self.marked_pos >= total_len:
self.marked_pos = total_len - 1
def addEndNotifier(self, notifier):
if self.endNotifier is None:
self.endNotifier = []
self.endNotifier.append(notifier)
def handleKey(self, key):
if key == KEY_LEFT:
self.marked_pos -= 1
self.validatePos()
elif key == KEY_RIGHT:
self.marked_pos += 1
self.validatePos()
elif key == KEY_HOME:
self.marked_pos = 0
self.validatePos()
elif key == KEY_END:
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
num += 1
self.marked_pos = max_pos - 1
self.validatePos()
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
block_len = [len(str(x[1])) for x in self.limits]
total_len = sum(block_len)
pos = 0
blocknumber = 0
block_len_total = [0, ]
for x in block_len:
pos += block_len[blocknumber]
block_len_total.append(pos)
if pos - 1 >= self.marked_pos:
pass
else:
blocknumber += 1
# length of numberblock
number_len = len(str(self.limits[blocknumber][1]))
# position in the block
posinblock = self.marked_pos - block_len_total[blocknumber]
oldvalue = abs(self._value[blocknumber]) # we are using abs in order to allow change negative values like default -1 on mis
olddec = oldvalue % 10 ** (number_len - posinblock) - (oldvalue % 10 ** (number_len - posinblock - 1))
newvalue = oldvalue - olddec + (10 ** (number_len - posinblock - 1) * number)
self._value[blocknumber] = newvalue
self.marked_pos += 1
self.validate()
self.changed()
def genText(self):
value = ""
mPos = self.marked_pos
num = 0
for i in self._value:
if value: #fixme no heading separator possible
value += self.seperator
if mPos >= len(value) - 1:
mPos += 1
if self.censor_char == "":
value += ("%0" + str(len(str(self.limits[num][1]))) + "d") % i
else:
value += (self.censor_char * len(str(self.limits[num][1])))
num += 1
return value, mPos
def getText(self):
(value, mPos) = self.genText()
return value
def getMulti(self, selected):
(value, mPos) = self.genText()
# only mark cursor when we are selected
# (this code is heavily ink optimized!)
if self.enabled:
return "mtext"[1-selected:], value, [mPos]
else:
return "text", value
def tostring(self, val):
return self.seperator.join([self.saveSingle(x) for x in val])
def saveSingle(self, v):
return str(v)
def fromstring(self, value):
try:
return [int(x) for x in value.split(self.seperator)]
except:
return self.default
def onDeselect(self, session):
if self.last_value != self._value:
self.changedFinal()
self.last_value = copy_copy(self._value)
ip_limits = [(0,255),(0,255),(0,255),(0,255)]
class ConfigIP(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = ip_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:self.marked_block])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return value, mBlock
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return "mtext"[1-selected:], value, mBlock
else:
return "text", value
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
mac_limits = [(1,255),(1,255),(1,255),(1,255),(1,255),(1,255)]
class ConfigMAC(ConfigSequence):
def __init__(self, default):
ConfigSequence.__init__(self, seperator = ":", limits = mac_limits, default = default)
class ConfigMacText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = 17
self.visible_width = visible_width
self.offset = 0
self.overwrite = 17
self.help_window = None
self.value = self.last_value = self.default = default
self.useableChars = '0123456789ABCDEF'
def validateMarker(self):
textlen = len(self.text)
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
elif self.marked_pos < 0:
self.marked_pos = 0
def insertChar(self, ch, pos, owr):
if self.text[pos] == ':':
pos += 1
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def handleKey(self, key):
if key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
if self.text[self.marked_pos-1] == ':':
self.marked_pos -= 2
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
if self.marked_pos < (len(self.text)-1):
if self.text[self.marked_pos+1] == ':':
self.marked_pos += 2
else:
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
if self.text[self.marked_pos] == ':':
self.marked_pos += 1
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
try:
return self.text.encode("utf-8")
except UnicodeDecodeError:
print "Broken UTF8!"
return self.text
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return "mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return "mtext"[1-selected:], self.text.encode("utf-8")+" ", mark
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.setAnimationMode(0)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPosition(ConfigSequence):
def __init__(self, default, args):
ConfigSequence.__init__(self, seperator = ",", limits = [(0,args[0]),(0,args[1]),(0,args[2]),(0,args[3])], default = default)
clock_limits = [(0,23),(0,59)]
class ConfigClock(ConfigSequence):
def __init__(self, default):
t = localtime(default)
ConfigSequence.__init__(self, seperator = ":", limits = clock_limits, default = [t.tm_hour, t.tm_min])
def increment(self):
# Check if Minutes maxed out
if self._value[1] == 59:
# Increment Hour, reset Minutes
if self._value[0] < 23:
self._value[0] += 1
else:
self._value[0] = 0
self._value[1] = 0
else:
# Increment Minutes
self._value[1] += 1
# Trigger change
self.changed()
def decrement(self):
# Check if Minutes is minimum
if self._value[1] == 0:
# Decrement Hour, set Minutes to 59
if self._value[0] > 0:
self._value[0] -= 1
else:
self._value[0] = 23
self._value[1] = 59
else:
# Decrement Minutes
self._value[1] -= 1
# Trigger change
self.changed()
integer_limits = (0, 9999999999)
class ConfigInteger(ConfigSequence):
def __init__(self, default, limits = integer_limits):
ConfigSequence.__init__(self, seperator = ":", limits = [limits], default = default)
# you need to override this to do input validation
def setValue(self, value):
self._value = [value]
self.changed()
def getValue(self):
return self._value[0]
value = property(getValue, setValue)
def fromstring(self, value):
return int(value)
def tostring(self, value):
return str(value)
class ConfigPIN(ConfigInteger):
def __init__(self, default, len = 4, censor = ""):
assert isinstance(default, int), "ConfigPIN default must be an integer"
ConfigSequence.__init__(self, seperator = ":", limits = [(0, (10**len)-1)], censor_char = censor, default = default)
self.len = len
def getLength(self):
return self.len
class ConfigFloat(ConfigSequence):
def __init__(self, default, limits):
ConfigSequence.__init__(self, seperator = ".", limits = limits, default = default)
def getFloat(self):
return float(self.value[1] / float(self.limits[1][1] + 1) + self.value[0])
float = property(getFloat)
# an editable text...
class ConfigText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", fixed_size = True, visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = fixed_size
self.visible_width = visible_width
self.offset = 0
self.overwrite = fixed_size
self.help_window = None
self.value = self.last_value = self.default = default
def validateMarker(self):
textlen = len(self.text)
if self.fixed_size:
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
else:
if self.marked_pos > textlen:
self.marked_pos = textlen
if self.marked_pos < 0:
self.marked_pos = 0
if self.visible_width:
if self.marked_pos < self.offset:
self.offset = self.marked_pos
if self.marked_pos >= self.offset + self.visible_width:
if self.marked_pos == textlen:
self.offset = self.marked_pos - self.visible_width
else:
self.offset = self.marked_pos - self.visible_width + 1
if self.offset > 0 and self.offset + self.visible_width > textlen:
self.offset = max(0, len - self.visible_width)
def insertChar(self, ch, pos, owr):
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def deleteChar(self, pos):
if not self.fixed_size:
self.text = self.text[0:pos] + self.text[pos + 1:]
elif self.overwrite:
self.text = self.text[0:pos] + " " + self.text[pos + 1:]
else:
self.text = self.text[0:pos] + self.text[pos + 1:] + " "
def deleteAllChars(self):
if self.fixed_size:
self.text = " " * len(self.text)
else:
self.text = ""
self.marked_pos = 0
def handleKey(self, key):
# this will no change anything on the value itself
# so we can handle it here in gui element
if key == KEY_DELETE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
self.deleteChar(self.marked_pos)
if self.fixed_size and self.overwrite:
self.marked_pos += 1
elif key == KEY_BACKSPACE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
elif self.marked_pos > 0:
self.deleteChar(self.marked_pos-1)
if not self.fixed_size and self.offset > 0:
self.offset -= 1
self.marked_pos -= 1
elif key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
self.marked_pos += 1
elif key == KEY_HOME:
self.timeout()
self.allmarked = False
self.marked_pos = 0
elif key == KEY_END:
self.timeout()
self.allmarked = False
self.marked_pos = len(self.text)
elif key == KEY_TOGGLEOW:
self.timeout()
self.overwrite = not self.overwrite
elif key == KEY_ASCII:
self.timeout()
newChar = unichr(getPrevAsciiCode())
if not self.useableChars or newChar in self.useableChars:
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
try:
return self.text.encode("utf-8")
except UnicodeDecodeError:
print "Broken UTF8!"
return self.text
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return "mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return "mtext"[1-selected:], self.text.encode("utf-8")+" ", mark
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.setAnimationMode(0)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPassword(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False, censor = "*"):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
self.censor_char = censor
self.hidden = True
def getMulti(self, selected):
mtext, text, mark = ConfigText.getMulti(self, selected)
if self.hidden:
text = len(text) * self.censor_char
return mtext, text, mark
def onSelect(self, session):
ConfigText.onSelect(self, session)
self.hidden = False
def onDeselect(self, session):
ConfigText.onDeselect(self, session)
self.hidden = True
# lets the user select between [min, min+stepwidth, min+(stepwidth*2)..., maxval] with maxval <= max depending
# on the stepwidth
# min, max, stepwidth, default are int values
# wraparound: pressing RIGHT key at max value brings you to min value and vice versa if set to True
class ConfigSelectionNumber(ConfigSelection):
def __init__(self, min, max, stepwidth, default = None, wraparound = False):
self.wraparound = wraparound
if default is None:
default = min
default = str(default)
choices = []
step = min
while step <= max:
choices.append(str(step))
step += stepwidth
ConfigSelection.__init__(self, choices, default)
def getValue(self):
return int(ConfigSelection.getValue(self))
def setValue(self, val):
ConfigSelection.setValue(self, str(val))
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
def isChanged(self):
sv = self.saved_value
strv = str(self.tostring(self.value))
if sv is None and strv == str(self.default):
return False
return strv != str(sv)
def handleKey(self, key):
if not self.wraparound:
if key == KEY_RIGHT:
if len(self.choices) == (self.choices.index(str(self.value)) + 1):
return
if key == KEY_LEFT:
if self.choices.index(str(self.value)) == 0:
return
nchoices = len(self.choices)
if nchoices > 1:
i = self.choices.index(str(self.value))
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
class ConfigNumber(ConfigText):
def __init__(self, default = 0):
ConfigText.__init__(self, str(default), fixed_size = False)
def getValue(self):
try:
return int(self.text)
except ValueError:
if self.text == "true":
self.text = "1"
else:
self.text = str(default)
return int(self.text)
def setValue(self, val):
self.text = str(val)
value = property(getValue, setValue)
_value = property(getValue, setValue)
def isChanged(self):
sv = self.saved_value
strv = self.tostring(self.value)
if sv is None and strv == self.default:
return False
return strv != sv
def conform(self):
pos = len(self.text) - self.marked_pos
self.text = self.text.lstrip("0")
if self.text == "":
self.text = "0"
if pos > len(self.text):
self.marked_pos = 0
else:
self.marked_pos = len(self.text) - pos
def handleKey(self, key):
if key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
ascii = getPrevAsciiCode()
if not (48 <= ascii <= 57):
return
else:
ascii = getKeyNumber(key) + 48
newChar = unichr(ascii)
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
else:
ConfigText.handleKey(self, key)
self.conform()
def onSelect(self, session):
self.allmarked = (self.value != "")
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
class ConfigSearchText(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False, search = True)
class ConfigDirectory(ConfigText):
def __init__(self, default="", visible_width=60):
ConfigText.__init__(self, default, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
def getValue(self):
if self.text == "":
return None
else:
return ConfigText.getValue(self)
def setValue(self, val):
if val is None:
val = ""
ConfigText.setValue(self, val)
def getMulti(self, selected):
if self.text == "":
return "mtext"[1-selected:], _("List of storage devices"), range(0)
else:
return ConfigText.getMulti(self, selected)
def onSelect(self, session):
self.allmarked = (self.value != "")
# a slider.
class ConfigSlider(ConfigElement):
def __init__(self, default = 0, increment = 1, limits = (0, 100)):
ConfigElement.__init__(self)
self.value = self.last_value = self.default = default
self.min = limits[0]
self.max = limits[1]
self.increment = increment
def checkValues(self, value = None):
if value is None:
value = self.value
if value < self.min:
value = self.min
elif value > self.max:
value = self.max
if self.value != value: #avoid call of setter if value not changed
self.value = value
def handleKey(self, key):
if key == KEY_LEFT:
tmp = self.value - self.increment
elif key == KEY_RIGHT:
tmp = self.value + self.increment
elif key == KEY_HOME:
self.value = self.min
return
elif key == KEY_END:
self.value = self.max
return
else:
return
self.checkValues(tmp)
def getText(self):
return "%d / %d" % (self.value, self.max)
def getMulti(self, selected):
self.checkValues()
return "slider", self.value, self.max
def fromstring(self, value):
return int(value)
# a satlist. in fact, it's a ConfigSelection.
class ConfigSatlist(ConfigSelection):
def __init__(self, list, default = None):
if default is not None:
default = str(default)
ConfigSelection.__init__(self, choices = [(str(orbpos), desc) for (orbpos, desc, flags) in list], default = default)
def getOrbitalPosition(self):
if self.value == "":
return None
return int(self.value)
orbital_position = property(getOrbitalPosition)
class ConfigSet(ConfigElement):
def __init__(self, choices, default=None):
if not default: default = []
ConfigElement.__init__(self)
if isinstance(choices, list):
choices.sort()
self.choices = choicesList(choices, choicesList.LIST_TYPE_LIST)
else:
assert False, "ConfigSet choices must be a list!"
if default is None:
default = []
self.pos = -1
default.sort()
self.last_value = self.default = default
self.value = default[:]
def toggleChoice(self, choice):
value = self.value
if choice in value:
value.remove(choice)
else:
value.append(choice)
value.sort()
self.changed()
def handleKey(self, key):
if key in KEY_NUMBERS + [KEY_DELETE, KEY_BACKSPACE]:
if self.pos != -1:
self.toggleChoice(self.choices[self.pos])
elif key == KEY_LEFT:
if self.pos < 0:
self.pos = len(self.choices)-1
else:
self.pos -= 1
elif key == KEY_RIGHT:
if self.pos >= len(self.choices)-1:
self.pos = -1
else:
self.pos += 1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def genString(self, lst):
res = ""
for x in lst:
res += self.description[x]+" "
return res
def getText(self):
return self.genString(self.value)
def getMulti(self, selected):
if not selected or self.pos == -1:
return "text", self.genString(self.value)
else:
tmp = self.value[:]
ch = self.choices[self.pos]
mem = ch in self.value
if not mem:
tmp.append(ch)
tmp.sort()
ind = tmp.index(ch)
val1 = self.genString(tmp[:ind])
val2 = " "+self.genString(tmp[ind+1:])
if mem:
chstr = " "+self.description[ch]+" "
else:
chstr = "("+self.description[ch]+")"
len_val1 = len(val1)
return "mtext", val1+chstr+val2, range(len_val1, len_val1 + len(chstr))
def onDeselect(self, session):
self.pos = -1
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value[:]
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
description = property(lambda self: descriptionList(self.choices.choices, choicesList.LIST_TYPE_LIST))
class ConfigDictionarySet(ConfigElement):
def __init__(self, default = {}):
ConfigElement.__init__(self)
self.default = default
self.dirs = {}
self.value = self.default
def getKeys(self):
return self.dir_pathes
def setValue(self, value):
if isinstance(value, dict):
self.dirs = value
self.changed()
def getValue(self):
return self.dirs
value = property(getValue, setValue)
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
def load(self):
sv = self.saved_value
if sv is None:
tmp = self.default
else:
tmp = self.fromstring(sv)
self.dirs = tmp
def changeConfigValue(self, value, config_key, config_value):
if isinstance(value, str) and isinstance(config_key, str):
if value in self.dirs:
self.dirs[value][config_key] = config_value
else:
self.dirs[value] = {config_key : config_value}
self.changed()
def getConfigValue(self, value, config_key):
if isinstance(value, str) and isinstance(config_key, str):
if value in self.dirs and config_key in self.dirs[value]:
return self.dirs[value][config_key]
return None
def removeConfigValue(self, value, config_key):
if isinstance(value, str) and isinstance(config_key, str):
if value in self.dirs and config_key in self.dirs[value]:
try:
del self.dirs[value][config_key]
except KeyError:
pass
self.changed()
def save(self):
del_keys = []
for key in self.dirs:
if not len(self.dirs[key]):
del_keys.append(key)
for del_key in del_keys:
try:
del self.dirs[del_key]
except KeyError:
pass
self.changed()
self.saved_value = self.tostring(self.dirs)
class ConfigLocations(ConfigElement):
def __init__(self, default=None, visible_width=False):
if not default: default = []
ConfigElement.__init__(self)
self.visible_width = visible_width
self.pos = -1
self.default = default
self.locations = []
self.mountpoints = []
self.value = default[:]
def setValue(self, value):
locations = self.locations
loc = [x[0] for x in locations if x[3]]
add = [x for x in value if not x in loc]
diff = add + [x for x in loc if not x in value]
locations = [x for x in locations if not x[0] in diff] + [[x, self.getMountpoint(x), True, True] for x in add]
#locations.sort(key = lambda x: x[0])
self.locations = locations
self.changed()
def getValue(self):
self.checkChangedMountpoints()
locations = self.locations
for x in locations:
x[3] = x[2]
return [x[0] for x in locations if x[3]]
value = property(getValue, setValue)
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
def load(self):
sv = self.saved_value
if sv is None:
tmp = self.default
else:
tmp = self.fromstring(sv)
locations = [[x, None, False, False] for x in tmp]
self.refreshMountpoints()
for x in locations:
if fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
self.locations = locations
def save(self):
locations = self.locations
if self.save_disabled or not locations:
self.saved_value = None
else:
self.saved_value = self.tostring([x[0] for x in locations])
def isChanged(self):
sv = self.saved_value
locations = self.locations
if val is None and not locations:
return False
return self.tostring([x[0] for x in locations]) != sv
def addedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = True
elif x[1] is None and fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
def removedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = False
def refreshMountpoints(self):
self.mountpoints = [p.mountpoint for p in harddiskmanager.getMountedPartitions() if p.mountpoint != "/"]
self.mountpoints.sort(key = lambda x: -len(x))
def checkChangedMountpoints(self):
oldmounts = self.mountpoints
self.refreshMountpoints()
newmounts = self.mountpoints
if oldmounts == newmounts:
return
for x in oldmounts:
if not x in newmounts:
self.removedMount(x)
for x in newmounts:
if not x in oldmounts:
self.addedMount(x)
def getMountpoint(self, file):
file = os_path.realpath(file)+"/"
for m in self.mountpoints:
if file.startswith(m):
return m
return None
def handleKey(self, key):
if key == KEY_LEFT:
self.pos -= 1
if self.pos < -1:
self.pos = len(self.value)-1
elif key == KEY_RIGHT:
self.pos += 1
if self.pos >= len(self.value):
self.pos = -1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def getText(self):
return " ".join(self.value)
def getMulti(self, selected):
if not selected:
valstr = " ".join(self.value)
if self.visible_width and len(valstr) > self.visible_width:
return "text", valstr[0:self.visible_width]
else:
return "text", valstr
else:
i = 0
valstr = ""
ind1 = 0
ind2 = 0
for val in self.value:
if i == self.pos:
ind1 = len(valstr)
valstr += str(val)+" "
if i == self.pos:
ind2 = len(valstr)
i += 1
if self.visible_width and len(valstr) > self.visible_width:
if ind1+1 < self.visible_width/2:
off = 0
else:
off = min(ind1+1-self.visible_width/2, len(valstr)-self.visible_width)
return "mtext", valstr[off:off+self.visible_width], range(ind1-off,ind2-off)
else:
return "mtext", valstr, range(ind1,ind2)
def onDeselect(self, session):
self.pos = -1
# nothing.
class ConfigNothing(ConfigSelection):
def __init__(self):
ConfigSelection.__init__(self, choices = [("","")])
# until here, 'saved_value' always had to be a *string*.
# now, in ConfigSubsection, and only there, saved_value
# is a dict, essentially forming a tree.
#
# config.foo.bar=True
# config.foobar=False
#
# turns into:
# config.saved_value == {"foo": {"bar": "True"}, "foobar": "False"}
#
class ConfigSubsectionContent(object):
pass
# we store a backup of the loaded configuration
# data in self.stored_values, to be able to deploy
# them when a new config element will be added,
# so non-default values are instantly available
# A list, for example:
# config.dipswitches = ConfigSubList()
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
class ConfigSubList(list, object):
def __init__(self):
list.__init__(self)
self.stored_values = {}
def save(self):
for x in self:
x.save()
def load(self):
for x in self:
x.load()
def getSavedValue(self):
res = { }
for i, val in enumerate(self):
sv = val.saved_value
if sv is not None:
res[str(i)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.stored_values.items():
if int(key) < len(self):
self[int(key)].saved_value = val
saved_value = property(getSavedValue, setSavedValue)
def append(self, item):
i = str(len(self))
list.append(self, item)
if i in self.stored_values:
item.saved_value = self.stored_values[i]
item.load()
def dict(self):
return dict([(str(index), value) for index, value in enumerate(self)])
# same as ConfigSubList, just as a dictionary.
# care must be taken that the 'key' has a proper
# str() method, because it will be used in the config
# file.
class ConfigSubDict(dict, object):
def __init__(self):
dict.__init__(self)
self.stored_values = {}
def save(self):
for x in self.values():
x.save()
def load(self):
for x in self.values():
x.load()
def getSavedValue(self):
res = {}
for (key, val) in self.items():
sv = val.saved_value
if sv is not None:
res[str(key)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.items():
if str(key) in self.stored_values:
val.saved_value = self.stored_values[str(key)]
saved_value = property(getSavedValue, setSavedValue)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if str(key) in self.stored_values:
item.saved_value = self.stored_values[str(key)]
item.load()
def dict(self):
return self
# Like the classes above, just with a more "native"
# syntax.
#
# some evil stuff must be done to allow instant
# loading of added elements. this is why this class
# is so complex.
#
# we need the 'content' because we overwrite
# __setattr__.
# If you don't understand this, try adding
# __setattr__ to a usual exisiting class and you will.
class ConfigSubsection(object):
def __init__(self):
self.__dict__["content"] = ConfigSubsectionContent()
self.content.items = { }
self.content.stored_values = { }
def __setattr__(self, name, value):
if name == "saved_value":
return self.setSavedValue(value)
assert isinstance(value, (ConfigSubsection, ConfigElement, ConfigSubList, ConfigSubDict)), "ConfigSubsections can only store ConfigSubsections, ConfigSubLists, ConfigSubDicts or ConfigElements"
content = self.content
content.items[name] = value
x = content.stored_values.get(name, None)
if x is not None:
#print "ok, now we have a new item,", name, "and have the following value for it:", x
value.saved_value = x
value.load()
def __getattr__(self, name):
return self.content.items[name]
def getSavedValue(self):
res = self.content.stored_values
for (key, val) in self.content.items.items():
sv = val.saved_value
if sv is not None:
res[key] = sv
elif key in res:
del res[key]
return res
def setSavedValue(self, values):
values = dict(values)
self.content.stored_values = values
for (key, val) in self.content.items.items():
value = values.get(key, None)
if value is not None:
val.saved_value = value
saved_value = property(getSavedValue, setSavedValue)
def save(self):
for x in self.content.items.values():
x.save()
def load(self):
for x in self.content.items.values():
x.load()
def dict(self):
return self.content.items
# the root config object, which also can "pickle" (=serialize)
# down the whole config tree.
#
# we try to keep non-existing config entries, to apply them whenever
# a new config entry is added to a subsection
# also, non-existing config entries will be saved, so they won't be
# lost when a config entry disappears.
class Config(ConfigSubsection):
def __init__(self):
ConfigSubsection.__init__(self)
def pickle_this(self, prefix, topickle, result):
for (key, val) in sorted(topickle.items(), key=lambda x: int(x[0]) if x[0].isdigit() else x[0].lower()):
name = '.'.join((prefix, key))
if isinstance(val, dict):
self.pickle_this(name, val, result)
elif isinstance(val, tuple):
result += [name, '=', str(val[0]), '\n']
else:
result += [name, '=', str(val), '\n']
def pickle(self):
result = []
self.pickle_this("config", self.saved_value, result)
return ''.join(result)
def unpickle(self, lines, base_file=True):
tree = { }
configbase = tree.setdefault("config", {})
for l in lines:
if not l or l[0] == '#':
continue
result = l.split('=', 1)
if len(result) != 2:
continue
(name, val) = result
val = val.strip()
#convert old settings
if l.startswith("config.Nims."):
tmp = name.split('.')
if tmp[3] == "cable":
tmp[3] = "dvbc"
elif tmp[3].startswith ("cable"):
tmp[3] = "dvbc." + tmp[3]
elif tmp[3].startswith("terrestrial"):
tmp[3] = "dvbt." + tmp[3]
else:
if tmp[3] not in ('dvbs', 'dvbc', 'dvbt', 'multiType'):
tmp[3] = "dvbs." + tmp[3]
name =".".join(tmp)
names = name.split('.')
base = configbase
for n in names[1:-1]:
base = base.setdefault(n, {})
base[names[-1]] = val
if not base_file: # not the initial config file..
#update config.x.y.value when exist
try:
configEntry = eval(name)
if configEntry is not None:
configEntry.value = val
except (SyntaxError, KeyError):
pass
# we inherit from ConfigSubsection, so ...
#object.__setattr__(self, "saved_value", tree["config"])
if "config" in tree:
self.setSavedValue(tree["config"])
def saveToFile(self, filename):
text = self.pickle()
try:
import os
f = open(filename + ".writing", "w")
f.write(text)
f.flush()
os.fsync(f.fileno())
f.close()
os.rename(filename + ".writing", filename)
except IOError:
print "Config: Couldn't write %s" % filename
def loadFromFile(self, filename, base_file=True):
self.unpickle(open(filename, "r"), base_file)
config = Config()
config.misc = ConfigSubsection()
class ConfigFile:
def __init__(self):
pass
CONFIG_FILE = resolveFilename(SCOPE_CONFIG, "settings")
def load(self):
try:
config.loadFromFile(self.CONFIG_FILE, True)
except IOError, e:
print "unable to load config (%s), assuming defaults..." % str(e)
def save(self):
# config.save()
config.saveToFile(self.CONFIG_FILE)
def __resolveValue(self, pickles, cmap):
key = pickles[0]
if cmap.has_key(key):
if len(pickles) > 1:
return self.__resolveValue(pickles[1:], cmap[key].dict())
else:
return str(cmap[key].value)
return None
def getResolvedKey(self, key):
names = key.split('.')
if len(names) > 1:
if names[0] == "config":
ret=self.__resolveValue(names[1:], config.content.items)
if ret and len(ret):
return ret
print "getResolvedKey", key, "failed !! (Typo??)"
return ""
def NoSave(element):
element.disableSave()
return element
configfile = ConfigFile()
configfile.load()
def getConfigListEntry(*args):
assert len(args) > 1, "getConfigListEntry needs a minimum of two arguments (descr, configElement)"
return args
def updateConfigElement(element, newelement):
newelement.value = element.value
return newelement
#def _(x):
# return x
#
#config.bla = ConfigSubsection()
#config.bla.test = ConfigYesNo()
#config.nim = ConfigSubList()
#config.nim.append(ConfigSubsection())
#config.nim[0].bla = ConfigYesNo()
#config.nim.append(ConfigSubsection())
#config.nim[1].bla = ConfigYesNo()
#config.nim[1].blub = ConfigYesNo()
#config.arg = ConfigSubDict()
#config.arg["Hello"] = ConfigYesNo()
#
#config.arg["Hello"].handleKey(KEY_RIGHT)
#config.arg["Hello"].handleKey(KEY_RIGHT)
#
##config.saved_value
#
##configfile.save()
#config.save()
#print config.pickle()
cec_limits = [(0,15),(0,15),(0,15),(0,15)]
class ConfigCECAddress(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = cec_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:self.marked_block])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return value, mBlock
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return "mtext"[1-selected:], value, mBlock
else:
return "text", value
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
class ConfigAction(ConfigElement):
def __init__(self, action, *args):
ConfigElement.__init__(self)
self.value = "(OK)"
self.action = action
self.actionargs = args
def handleKey(self, key):
if (key == KEY_OK):
self.action(*self.actionargs)
def getMulti(self, dummy):
pass
``` |
[
{
"content": "```python\nimport os\nimport vtkAll as vtk\nimport math\nimport time\nimport re\nimport numpy as np\n\nfrom director.timercallback import TimerCallback\nfrom director import objectmodel as om\nfrom director.simpletimer import SimpleTimer\nfrom director.utime import getUtime\nfrom director import r... | [
{
"content": "<|memory_start|>```python\nimport os\nimport vtkAll as vtk\nimport math\nimport time\nimport re\nimport numpy as np\n\nfrom director.timercallback import TimerCallback\nfrom director import objectmodel as om\nfrom director.simpletimer import SimpleTimer\nfrom director.utime import getUtime\nfrom d... | ```python
import os
import vtkAll as vtk
import math
import time
import re
import numpy as np
from director.timercallback import TimerCallback
from director import objectmodel as om
from director.simpletimer import SimpleTimer
from director.utime import getUtime
from director import robotstate
import copy
import pickle
import scipy.interpolate
def asRobotPlan(msg):
'''
If the given message is a robot_plan_with_supports_t then this function returns
the plan message contained within it. For any other message type, this function
just returns its input argument.
'''
try:
import drc as lcmdrc
except ImportError:
pass
else:
if isinstance(msg, lcmdrc.robot_plan_with_supports_t):
return msg.plan
return msg
class PlanPlayback(object):
def __init__(self):
self.animationCallback = None
self.animationTimer = None
self.interpolationMethod = 'slinear'
self.playbackSpeed = 1.0
self.jointNameRegex = ''
@staticmethod
def getPlanPoses(msgOrList):
if isinstance(msgOrList, list):
messages = msgOrList
allPoseTimes, allPoses = PlanPlayback.getPlanPoses(messages[0])
for msg in messages[1:]:
poseTimes, poses = PlanPlayback.getPlanPoses(msg)
poseTimes += allPoseTimes[-1]
allPoseTimes = np.hstack((allPoseTimes, poseTimes[1:]))
allPoses += poses[1:]
return allPoseTimes, allPoses
else:
msg = asRobotPlan(msgOrList)
poses = []
poseTimes = []
for plan in msg.plan:
pose = robotstate.convertStateMessageToDrakePose(plan)
poseTimes.append(plan.utime / 1e6)
poses.append(pose)
return np.array(poseTimes), poses
@staticmethod
def getPlanElapsedTime(msg):
msg = asRobotPlan(msg)
startTime = msg.plan[0].utime
endTime = msg.plan[-1].utime
return (endTime - startTime) / 1e6
@staticmethod
def mergePlanMessages(plans):
msg = copy.deepcopy(plans[0])
for plan in plans[1:]:
plan = copy.deepcopy(plan)
lastTime = msg.plan[-1].utime
for state in plan.plan:
state.utime += lastTime
msg.plan_info += plan.plan_info
msg.plan += plan.plan
msg.num_states = len(msg.plan)
return msg
@staticmethod
def isPlanInfoFeasible(info):
return 0 <= info < 10
@staticmethod
def isPlanFeasible(plan):
plan = asRobotPlan(plan)
return plan is not None and (max(plan.plan_info) < 10 and min(plan.plan_info) >= 0)
def stopAnimation(self):
if self.animationTimer:
self.animationTimer.stop()
def setInterpolationMethod(method):
self.interpolationMethod = method
def playPlan(self, msg, jointController):
self.playPlans([msg], jointController)
def playPlans(self, messages, jointController):
assert len(messages)
poseTimes, poses = self.getPlanPoses(messages)
self.playPoses(poseTimes, poses, jointController)
def getPoseInterpolatorFromPlan(self, message):
poseTimes, poses = self.getPlanPoses(message)
return self.getPoseInterpolator(poseTimes, poses)
def getPoseInterpolator(self, poseTimes, poses, unwrap_rpy=True):
if unwrap_rpy:
poses = np.array(poses, copy=True)
poses[:,3:6] = np.unwrap(poses[:,3:6],axis=0)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(poseTimes, poses, axis=0, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.PchipInterpolator(poseTimes, poses, axis=0)
return f
def getPlanPoseMeshes(self, messages, jointController, robotModel, numberOfSamples):
poseTimes, poses = self.getPlanPoses(messages)
f = self.getPoseInterpolator(poseTimes, poses)
sampleTimes = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
meshes = []
for sampleTime in sampleTimes:
pose = f(sampleTime)
jointController.setPose('plan_playback', pose)
polyData = vtk.vtkPolyData()
robotModel.model.getModelMesh(polyData)
meshes.append(polyData)
return meshes
def showPoseAtTime(self, time, jointController, poseInterpolator):
pose = poseInterpolator(time)
jointController.setPose('plan_playback', pose)
def playPoses(self, poseTimes, poses, jointController):
f = self.getPoseInterpolator(poseTimes, poses)
timer = SimpleTimer()
def updateAnimation():
tNow = timer.elapsed() * self.playbackSpeed
if tNow > poseTimes[-1]:
pose = poses[-1]
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
return False
pose = f(tNow)
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
self.animationTimer = TimerCallback()
self.animationTimer.targetFps = 60
self.animationTimer.callback = updateAnimation
self.animationTimer.start()
updateAnimation()
def picklePlan(self, filename, msg):
poseTimes, poses = self.getPlanPoses(msg)
pickle.dump((poseTimes, poses), open(filename, 'w'))
def getMovingJointNames(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
return jointNames
def plotPlan(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
self.plotPoses(poseTimes, poses)
def plotPoses(self, poseTimes, poses):
import matplotlib.pyplot as plt
poses = np.array(poses)
if self.jointNameRegex:
jointIds = range(poses.shape[1])
else:
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
jointTrajectories = [poses[:,jointId] for jointId in jointIds]
seriesNames = []
sampleResolutionInSeconds = 0.01
numberOfSamples = (poseTimes[-1] - poseTimes[0]) / sampleResolutionInSeconds
xnew = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
fig = plt.figure()
ax = fig.add_subplot(111)
for jointId, jointName, jointTrajectory in zip(jointIds, jointNames, jointTrajectories):
if self.jointNameRegex and not re.match(self.jointNameRegex, jointName):
continue
x = poseTimes
y = jointTrajectory
y = np.rad2deg(y)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(x, y, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.PchipInterpolator(x, y)
ax.plot(x, y, 'ko')
seriesNames.append(jointName + ' points')
ax.plot(xnew, f(xnew), '-')
seriesNames.append(jointName + ' ' + self.interpolationMethod)
ax.legend(seriesNames, loc='upper right').draggable()
ax.set_xlabel('time (s)')
ax.set_ylabel('joint angle (deg)')
ax.set_title('joint trajectories')
plt.show()
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\nfrom ..utils import TranspileTestCase\n\n\nclass AssignmentTests(TranspileTestCase):\n def test_simple_assignment(self):\n self.assertCodeExecution(\"\"\"\n x = 42\n print(x)\n print('Done... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\nfrom ..utils import TranspileTestCase\n\n\nclass AssignmentTests(TranspileTestCase):\n def test_simple_assignment(self):\n self.assertCodeExecution(\"\"\"\n x = 42\n print(x)\n ... | ```python
from ..utils import TranspileTestCase
class AssignmentTests(TranspileTestCase):
def test_simple_assignment(self):
self.assertCodeExecution("""
x = 42
print(x)
print('Done.')
""")
def test_multiple_assignment(self):
self.assertCodeExecution("""
x = y = 42
print(x, y)
print('Done.')
""")
def test_old_style_conditional_assignment(self):
self.assertCodeExecution("""
x = 42
y = x or 37
print(y)
x = 0
y = x or 37
print(y)
print('Done.')
""")
def test_conditional_assignment(self):
self.assertCodeExecution("""
x = 42
y = 99 if x else 37
print(y)
x = 0
y = 99 if x else 37
print(y)
print('Done.')
""")
def test_access_potentially_unassigned(self):
self.assertCodeExecution("""
x = 37
if x > 0:
y = 42
print(y)
print('Done.')
""")
def test_use_potentially_unassigned(self):
self.assertCodeExecution("""
x = 37
if y > 0:
print("Yes")
else:
print("No")
print('Done.')
""")
def test_assign_to_argument(self):
self.assertCodeExecution("""
def foo(arg):
val = arg + 10
arg = val - 2
return arg
print(foo(20))
print('Done.')
""")
def test_list_assignment(self):
self.assertCodeExecution("""
[x, y, z] = range(3)
print(x)
print(y)
print(z)
print('Done.')
""")
def test_tuple_assignment(self):
self.assertCodeExecution("""
(x, y, z) = range(3)
print(x)
print(y)
print(z)
print('Done.')
""")
def test_implied_tuple_assignment(self):
self.assertCodeExecution("""
x, y, z = range(3)
print(x)
print(y)
print(z)
print('Done.')
""")
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\nOLD_LISTING_MODE = 'OLD_LISTING_MODE'\nLIVE_EVENTS_MODE = 'LIVE_EVENTS'\nPLAY_MODE = 'PLAY'\nPLAY_ITEM_MODE = 'PLAY_ITEM'\nPLAY_TV_MODE = 'PLAY_TV'\n\nUPCOMING_MODE = 'UPCOMING'\nAUTHENTICATE_MODE = 'AUTHENTICATE'\nAUTHENTICATION_DETAILS_MODE = 'AUT... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nOLD_LISTING_MODE = 'OLD_LISTING_MODE'\nLIVE_EVENTS_MODE = 'LIVE_EVENTS'\nPLAY_MODE = 'PLAY'\nPLAY_ITEM_MODE = 'PLAY_ITEM'\nPLAY_TV_MODE = 'PLAY_TV'\n\nUPCOMING_MODE = 'UPCOMING'\nAUTHENTICATE_MODE = 'AUTHENTICATE'\nAUTHENTICATION_DET... | ```python
OLD_LISTING_MODE = 'OLD_LISTING_MODE'
LIVE_EVENTS_MODE = 'LIVE_EVENTS'
PLAY_MODE = 'PLAY'
PLAY_ITEM_MODE = 'PLAY_ITEM'
PLAY_TV_MODE = 'PLAY_TV'
UPCOMING_MODE = 'UPCOMING'
AUTHENTICATE_MODE = 'AUTHENTICATE'
AUTHENTICATION_DETAILS_MODE = 'AUTHENTICATION_DETAILS'
NETWORK_ID = 'NETWORK_ID'
EVENT_ID = 'EVENT_ID'
SIMULCAST_AIRING_ID = 'SIMULCAST_AIRING_ID'
SESSION_URL = 'SESSION_URL'
DESKTOP_STREAM_SOURCE = 'DESKTOP_STREAM_SOURCE'
NETWORK_NAME = 'NETWORK_NAME'
EVENT_NAME = 'EVENT_NAME'
EVENT_GUID = 'EVENT_GUID'
ADOBE_RSS = 'ADOBE_RSS'
EVENT_PARENTAL_RATING = 'EVENT_PARENTAL_RATING'
SHELF_ID = 'SHELF_ID'
SHOWCASE_URL = 'SHOWCASE_URL'
SHOWCASE_NAV_ID = 'SHOWCASE_NAV_ID'
PLAYBACK_URL = 'PLAYBACK_URL'
REFRESH_LIVE_MODE = 'REFRESH_LIVE_MODE'
CHANNEL_RESOURCE_ID = 'CHANNEL_RESOURCE_ID'
ESPN_URL = 'ESPN_URL'
MODE = 'MODE'
SPORT = 'SPORT'
BAM_NS = '{http://services.bamnetworks.com/media/types/2.1}'
# Taken from https://espn.go.com/watchespn/player/config
ESPN3_ID = 'n360'
SECPLUS_ID = 'n323'
ACC_EXTRA_ID = 'n321'
CHANNEL_SETTINGS = {
'ShowEspn1': 'espn1',
'ShowEspn2': 'espn2',
'ShowEspn3': 'espn3',
'ShowEspnu': 'espnu',
'ShowEspnews': 'espnews',
'ShowEspnDeportes': 'espndeportes',
'ShowSec': 'sec',
'ShowSecPlus': 'secplus',
'ShowLonghorn': 'longhorn',
'ShowBuzzerBeater': 'buzzerbeater',
'ShowAccExtra': 'accextra',
'ShowGoalLine': 'goalline',
'ShowAcc': 'acc',
}
NETWORK_ID_TO_NETWORK_NAME = {
'espn1': 30990,
'espn2': 30991,
'espn3': 30992,
'espnu': 30993,
'espnews': 30994,
'espndeportes': 30995,
'sec': 30996,
'longhorn': 30998,
'accextra': 30989,
'goalline': 30988,
'secplus': 30997,
'acc': 31000,
}
ID = 'id'
URL = 'url'
TV_OS_HOME = 'http://watch.product.api.espn.com/api/product/v1/tvos/watchespn/home'
TV_OS_CHANNELS = 'http://watch.product.api.espn.com/api/product/v1/tvos/watchespn/channels'
TV_OS_SPORTS = 'http://watch.product.api.espn.com/api/product/v1/tvos/watchespn/sports'
APPLE_TV_FEATURED = 'http://espn.go.com/watchespn/appletv/featured'
APPLE_TV_SPORTS = 'http://espn.go.com/watchespn/appletv/sports'
APPLE_TV_CHANNELS = 'http://espn.go.com/watchespn/appletv/channels'
WATCH_API_V1_TRENDING = 'http://watch.api.espn.com/v1/trending'
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\nimport math\nimport unittest\n\nfrom rainbowrunners.utils import Colors, get_terminal_size\n\n__all__ = (\n 'NyanCatResult',\n)\n\n\nclass BaseRainbowResult(unittest.TestResult):\n \"\"\"A test result class that can print rainbow and awesome pet to a str... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nimport math\nimport unittest\n\nfrom rainbowrunners.utils import Colors, get_terminal_size\n\n__all__ = (\n 'NyanCatResult',\n)\n\n\nclass BaseRainbowResult(unittest.TestResult):\n \"\"\"A test result class that can print rainbow and awes... | ```python
# -*- coding: utf-8 -*-
import math
import unittest
from rainbowrunners.utils import Colors, get_terminal_size
__all__ = (
'NyanCatResult',
)
class BaseRainbowResult(unittest.TestResult):
"""A test result class that can print rainbow and awesome pet to a stream.
"""
separator1 = '\033[{0}m{1:*^70}\033[0m'.format(Colors.SEPARATOR1, '')
separator2 = '\033[{0}m{1:-^70}\033[0m'.format(Colors.SEPARATOR2, '')
def __init__(self, stream=None, descriptions=None, verbosity=None):
super(BaseRainbowResult, self).__init__(
stream=stream, descriptions=descriptions, verbosity=verbosity
)
self.stream = stream
self.descriptions = descriptions
self.width = get_terminal_size()[0] * 0.8
self.tick = False
self.number_of_lines = 4
self.trajectories = [[], [], [], []]
self.pet_width = 11
self.scoreboard_width = 5
self.trajectory_width_max = self.width - self.pet_width
self.rainbow_colors = self.generate_colors()
self.color_index = 0
self.success = 0
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
return str(test)
def addSuccess(self, test):
super(BaseRainbowResult, self).addSuccess(test)
self.success += 1
self.draw()
def addError(self, test, err):
super(BaseRainbowResult, self).addError(test, err)
self.draw()
def addFailure(self, test, err):
super(BaseRainbowResult, self).addFailure(test, err)
self.draw()
def addSkip(self, test, reason):
super(BaseRainbowResult, self).addSkip(test, reason)
self.draw()
def addExpectedFailure(self, test, err):
super(BaseRainbowResult, self).addExpectedFailure(test, err)
self.draw()
def addUnexpectedSuccess(self, test):
super(BaseRainbowResult, self).addUnexpectedSuccess(test)
self.draw()
def printErrors(self):
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln('{0}: {1}'.format(flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
def startTestRun(self):
self.cursor_hide()
def stopTestRun(self):
for i in range(self.number_of_lines):
self.stream.write('\n')
self.cursor_show()
def draw(self):
self.append_rainbow()
draw_methods = (
self.draw_scoreboard,
self.draw_rainbow,
self.draw_pet,
)
for method in draw_methods:
method()
self.cursor_up()
self.tick = not self.tick
def append_rainbow(self):
segment = '_' if self.tick else '-'
rainbowified = self.rainbowify(segment)
for index in range(self.number_of_lines):
trajectory = self.trajectories[index]
if len(trajectory) >= self.trajectory_width_max:
trajectory.pop(0)
trajectory.append(rainbowified)
def draw_scoreboard(self):
self.draw_score(self.success, Colors.SUCCESS)
self.draw_score(len(self.errors), Colors.ERROR)
self.draw_score(len(self.failures), Colors.FAIL)
self.stream.writeln()
def draw_score(self, score, color):
self.stream.write(' ')
self.stream.writeln('\033[{0}m{1}\033[0m'.format(color, score))
def draw_rainbow(self):
for trajectory in self.trajectories:
self.stream.write('\033[{0}C'.format(self.scoreboard_width))
self.stream.writeln(''.join(trajectory))
def draw_pet(self):
raise NotImplementedError
def rainbowify(self, string):
color = self.rainbow_colors[self.color_index % len(self.rainbow_colors)]
self.color_index += 1
return '\033[38;5;{0}m{1}\033[0m'.format(color, string)
def generate_colors(self):
pi3 = math.floor(math.pi / 3)
n = lambda i: i * (1.0 / 6)
r = lambda i: math.floor(3 * math.sin(n(i)) + 3)
g = lambda i: math.floor(3 * math.sin(n(i) + 2 + pi3) + 3)
b = lambda i: math.floor(3 * math.sin(n(i) + 4 + pi3) + 3)
colors = [int(36 * r(i) + 6 * g(i) + b(i) + 16) for i in range(42)]
return colors
def cursor_up(self):
self.stream.write('\033[{0}A'.format(self.number_of_lines))
def cursor_hide(self):
self.stream.write('\033[?25l')
def cursor_show(self):
self.stream.write('\033[?25h')
class NyanCatResult(BaseRainbowResult):
def draw_pet(self):
start_width = self.scoreboard_width + len(self.trajectories[0])
color = '\033[{0}C'.format(start_width)
self.stream.write(color)
self.stream.writeln('_,------,')
self.stream.write(color)
padding = ' ' if self.tick else ' '
self.stream.writeln('_|{0}/\\_/\\ '.format(padding))
self.stream.write(color)
padding = '_' if self.tick else '__'
tail = '~' if self.tick else '^'
self.stream.write('{0}|{1}{2} '.format(tail, padding, self.face()))
self.stream.writeln()
self.stream.write(color)
padding = ' ' if self.tick else ' '
self.stream.writeln('{0}"" "" '.format(padding))
def face(self):
if self.errors:
return '( x .x)'
elif self.failures:
return '( o .o)'
else:
return '( ^ .^)'
``` |
[
{
"content": "Here is a code file:\n```python\nfrom settings_local import SUBSCRIPTION_ID, STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY, EMAIL_USERNAME, EMAIL_PASSWORD\n\n__author__ = 'Natalie Sanders'\n\n__author__ = 'Natalie Sanders'\n\nfrom azure.servicemanagement import *\nfrom azure.storage import *\nfrom sub... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom settings_local import SUBSCRIPTION_ID, STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY, EMAIL_USERNAME, EMAIL_PASSWORD\n\n__author__ = 'Natalie Sanders'\n\n__author__ = 'Natalie Sanders'\n\nfrom azure.servicemanagement import *\nfrom azure.storage im... | ```python
from settings_local import SUBSCRIPTION_ID, STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY, EMAIL_USERNAME, EMAIL_PASSWORD
__author__ = 'Natalie Sanders'
__author__ = 'Natalie Sanders'
from azure.servicemanagement import *
from azure.storage import *
from subprocess import call
from os import chdir
import os
import socket
import zipfile
import pickle
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders
global user_info
def delete_vm():
hosted_service = sms.get_hosted_service_properties(service_name=username, embed_detail=True)
if hosted_service.deployments:
deployment = sms.get_deployment_by_name(username, username)
roles = deployment.role_list
for instance in roles:
if machine_name == instance.role_name:
if len(roles) == 1:
sms.delete_deployment(service_name=username, deployment_name=username)
else:
sms.delete_role(service_name=username, deployment_name=username, role_name=machine_name)
break
def send_mail( send_from, send_to, subject, text, files=[], server="localhost", port=587, username='', password='', isTls=True):
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to
msg['Date'] = formatdate(localtime = True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for f in files:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="{0}"'.format(os.path.basename(f)))
msg.attach(part)
smtp = smtplib.SMTP(server, port)
if isTls: smtp.starttls()
smtp.login(username,password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.quit()
print "emailed\n"
def upload_results():
z = zipfile.ZipFile(user_info["sim"]+'_Results.zip', "w", zipfile.ZIP_DEFLATED)
for f in os.listdir("Input"):
chdir("c:/Users/Public/Sim/Input")
z.write(f)
chdir("c:/Users/Public/Sim/Output")
z.write("stdout.txt")
z.close()
result = 'r-' + machine_name
blob_service.put_block_blob_from_path(container_name, result, 'c:/Users/Public/Sim/Output.zip')
print "uploaded\n"
def download_input():
blob_service.get_blob_to_path(container_name, machine_name, 'c:/Users/Public/Sim/Input.zip')
chdir("C:/Users/Public/Sim")
z = zipfile.ZipFile('Input.zip', 'r')
z.extractall('Input')
z.close()
print "downloaded\n"
########################################################################################################################
## MAIN ##
########################################################################################################################
##### Service Management Object #####
machine_name = socket.gethostname()
split = machine_name.split('-')
container_name = '-'.join(split[:-1]).lower()
username = '-'.join(split[:-1])
subscription_id = SUBSCRIPTION_ID
certificate_path = 'CURRENT_USER\\my\\AzureCertificate'
call(['certutil', '-user', '-f', '-p', '1', '-importPFX', 'c:/temp/azure.pfx'])
sms = ServiceManagementService(subscription_id, certificate_path)
###### Redirect stdout to File ######
chdir('C:/Users/Public/Sim')
output = open("Output/stdout.txt", "w+")
####### Download Input Files ########
blob_service = BlobService(
account_name=STORAGE_ACCOUNT_NAME,
account_key=STORAGE_ACCOUNT_KEY)
try:
download_input()
f = "C:/Users/Public/Sim/Input/AzureUserInfo.pickle"
user_info = pickle.load(file(f))
output.write('Mock model executed correctly.')
output.close()
print "download input"
except:
output.write('Could not download input from the cloud.\n')
output.close()
try:
########### Upload Results ##########
upload_results()
########### Email Results ###########
send_mail( send_from = 'vecnet.results@gmail.com',
send_to = user_info["email"],
subject = 'The results for your ' + user_info["sim"] + ' simulation are ready!',
text = 'Hi ' + user_info['username'] + ',\n\nYour ' + user_info["sim"] + ' simulation has '
'finished running. Look for your results below.\n\nThanks for using VecNet Azure '
'resources!\nThe VecNet Team',
files = ['c:/Users/Public/Sim/' + user_info["sim"] + '_Results.zip'],
server = "smtp.gmail.com",
port = 587,
username = EMAIL_USERNAME,
password = EMAIL_PASSWORD,
isTls = True)
print "sent mail"
############# Exit Script #############
finally:
delete_vm()
``` |
[
{
"content": "```python\nfrom django.test import TestCase\nfrom django.conf import settings\nfrom restclients.exceptions import DataFailureException\nimport restclients.trumba.calendar as Calendar\nfrom restclients.trumba.exceptions import TrumbaException, CalendarNotExist, CalendarOwnByDiffAccount, NoDataRetur... | [
{
"content": "<|memory_start|>```python\nfrom django.test import TestCase\nfrom django.conf import settings\nfrom restclients.exceptions import DataFailureException\nimport restclients.trumba.calendar as Calendar\nfrom restclients.trumba.exceptions import TrumbaException, CalendarNotExist, CalendarOwnByDiffAcco... | ```python
from django.test import TestCase
from django.conf import settings
from restclients.exceptions import DataFailureException
import restclients.trumba.calendar as Calendar
from restclients.trumba.exceptions import TrumbaException, CalendarNotExist, CalendarOwnByDiffAccount, NoDataReturned, UnknownError, UnexpectedError
class TrumbaTestCalendars(TestCase):
def test_get_bot_calendars_normal_cases(self):
with self.settings(
RESTCLIENTS_TRUMBA_BOT_DAO_CLASS='restclients.dao_implementation.trumba.FileBot'
):
result = Calendar.get_bot_calendars()
self.assertTrue(result is not None and len(result) == 4)
def test_get_sea_calendars_normal_cases(self):
with self.settings(
RESTCLIENTS_TRUMBA_SEA_DAO_CLASS='restclients.dao_implementation.trumba.FileSea'
):
result = Calendar.get_sea_calendars()
self.assertIsNotNone(result)
self.assertTrue(len(result) == 10)
trumba_cal = result[1]
self.assertEqual(trumba_cal.calendarid, 1)
self.assertEqual(trumba_cal.campus, 'sea')
self.assertEqual(trumba_cal.name, 'Seattle calendar')
self.assertTrue(trumba_cal.is_sea())
self.assertFalse(trumba_cal.is_bot())
self.assertFalse(trumba_cal.is_tac())
trumba_cal = result[11321]
self.assertEqual(trumba_cal.calendarid, 11321)
self.assertEqual(trumba_cal.campus, 'sea')
self.assertEqual(trumba_cal.name,
'Seattle calendar >> Seattle child calendar3 >> Seattle child-sub-calendar32 >> Seattle child-sub-sub-calendar321')
self.assertTrue(trumba_cal.is_sea())
self.assertFalse(trumba_cal.is_bot())
self.assertFalse(trumba_cal.is_tac())
def test_get_tac_calendars_normal_cases(self):
with self.settings(
RESTCLIENTS_TRUMBA_TAC_DAO_CLASS='restclients.dao_implementation.trumba.FileTac'
):
self.assertIsNotNone(Calendar.get_tac_calendars())
self.assertTrue(len(Calendar.get_tac_calendars()) == 1)
def test_get_sea_permissions_normal_cases(self):
with self.settings(
RESTCLIENTS_TRUMBA_SEA_DAO_CLASS='restclients.dao_implementation.trumba.FileSea'
):
result = Calendar.get_sea_permissions(1)
self.assertIsNotNone(result)
self.assertTrue(len(result) == 3)
perm = result[0]
self.assertEqual(perm.calendarid, 1)
self.assertEqual(perm.campus, 'sea')
self.assertEqual(perm.name, 'Dummy publisher')
self.assertEqual(perm.uwnetid, 'dummyp')
self.assertEqual(perm.level, 'PUBLISH')
self.assertTrue(perm.is_edit())
self.assertFalse(perm.is_showon())
self.assertTrue(perm.is_sea())
self.assertFalse(perm.is_bot())
self.assertFalse(perm.is_tac())
perm = result[1]
self.assertEqual(perm.calendarid, 1)
self.assertEqual(perm.campus, 'sea')
self.assertEqual(perm.name, 'Dummy editor')
self.assertEqual(perm.uwnetid, 'dummye')
self.assertEqual(perm.level, 'EDIT')
self.assertTrue(perm.is_edit())
self.assertFalse(perm.is_showon())
self.assertTrue(perm.is_sea())
self.assertFalse(perm.is_bot())
self.assertFalse(perm.is_tac())
perm = result[2]
self.assertEqual(perm.calendarid, 1)
self.assertEqual(perm.campus, 'sea')
self.assertEqual(perm.name, 'Dummy showon')
self.assertEqual(perm.uwnetid, 'dummys')
self.assertEqual(perm.level, 'SHOWON')
self.assertFalse(perm.is_edit())
self.assertTrue(perm.is_showon())
self.assertTrue(perm.is_sea())
self.assertFalse(perm.is_bot())
self.assertFalse(perm.is_tac())
def test_get_sea_permissions_error_cases(self):
with self.settings(
RESTCLIENTS_TRUMBA_SEA_DAO_CLASS='restclients.dao_implementation.trumba.FileSea'
):
self.assertRaises(CalendarNotExist,
Calendar.get_sea_permissions, 0)
self.assertRaises(CalendarOwnByDiffAccount,
Calendar.get_sea_permissions, 2)
def test_create_body(self):
self.assertEqual(Calendar._create_get_perm_body(1), '{"CalendarID": 1}')
def test_is_valid_calendarid(self):
self.assertTrue(Calendar._is_valid_calendarid(1))
self.assertFalse(Calendar._is_valid_calendarid(0))
self.assertFalse(Calendar._is_valid_calendarid(-1))
def test_is_valid_email(self):
self.assertTrue(Calendar._is_valid_email('test@washington.edu'))
self.assertFalse(Calendar._is_valid_email('test-email@washington.edu'))
self.assertFalse(Calendar._is_valid_email('test_email@washington.edu'))
self.assertFalse(Calendar._is_valid_email('test.email@washington.edu'))
self.assertFalse(Calendar._is_valid_email('test@uw.edu'))
self.assertFalse(Calendar._is_valid_email('0test@washington.edu'))
self.assertFalse(Calendar._is_valid_email(''))
def test_extract_uwnetid(self):
self.assertEqual(Calendar._extract_uwnetid('test@washington.edu'), 'test')
self.assertEqual(Calendar._extract_uwnetid('test'), 'test')
self.assertEqual(Calendar._extract_uwnetid('@washington.edu'), '')
self.assertEqual(Calendar._extract_uwnetid('bad@uw.edu'), 'bad@uw.edu')
self.assertEqual(Calendar._extract_uwnetid(''), '')
def test_check_err(self):
self.assertRaises(UnexpectedError,
Calendar._check_err,
{"d":{"Messages":[{"Code":3009,
"Description":"..."}]}})
self.assertRaises(CalendarOwnByDiffAccount,
Calendar._check_err,
{"d":{"Messages":[{"Code":3007}]}})
self.assertRaises(CalendarNotExist,
Calendar._check_err,
{"d":{"Messages":[{"Code":3006}]}})
self.assertRaises(NoDataReturned,
Calendar._check_err, {'d': None})
self.assertRaises(UnknownError,
Calendar._check_err,
{"d":{"Messages":[]}})
self.assertRaises(UnknownError,
Calendar._check_err,
{"d":{"Messages":[{"Code": None}]}})
self.assertIsNone(Calendar._check_err({"d":{"Messages":None}}))
def test_process_get_cal_resp(self):
"""
Omit for now . To be implemented using httmock
"""
pass
def test_process_get_perm_resp(self):
"""
Omit for now . To be implemented using httmock
"""
pass
``` |
[
{
"content": "Repeat the full code snippet:\n```python\nimport os.path\n\nfrom PyQt5.QtCore import QObject, QStandardPaths, QDir, pyqtSlot, pyqtProperty\n\nfrom .res import getRes\n\n\nclass Resources(QObject):\n\n appDataDir = QStandardPaths.writableLocation(QStandardPaths.AppDataLocation)\n\n soundFileE... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\nimport os.path\n\nfrom PyQt5.QtCore import QObject, QStandardPaths, QDir, pyqtSlot, pyqtProperty\n\nfrom .res import getRes\n\n\nclass Resources(QObject):\n\n appDataDir = QStandardPaths.writableLocation(QStandardPaths.AppDataLocation)\n... | ```python
import os.path
from PyQt5.QtCore import QObject, QStandardPaths, QDir, pyqtSlot, pyqtProperty
from .res import getRes
class Resources(QObject):
appDataDir = QStandardPaths.writableLocation(QStandardPaths.AppDataLocation)
soundFileExt = '.wav'
iconFileExt = '.svg'
homeAppDir = os.path.join(QStandardPaths.standardLocations(QStandardPaths.HomeLocation)[0], '.pisak')
@pyqtSlot(str, result=str)
def getSoundPath(self, soundName):
soundFile = soundName + self.soundFileExt
path = os.path.join(self.appDataDir, soundFile)
if not os.path.exists(path):
path = getRes(os.path.join('sounds', soundFile))
return path
@pyqtSlot(str, result=str)
def getIconPath(self, iconName):
iconFile = iconName + self.iconFileExt
return getRes(os.path.join('icons', iconFile))
@pyqtSlot(str, result=str)
def getResource(self, item):
return getRes(item)
@pyqtProperty(str, constant=True)
def symbolsFolder(self):
return getRes('symbols')
@pyqtProperty(str, constant=True)
def moviesDir(self):
return QStandardPaths.standardLocations(QStandardPaths.MoviesLocation)[0]
@pyqtProperty(str, constant=True)
def musicDir(self):
return QStandardPaths.standardLocations(QStandardPaths.MusicLocation)[0]
@pyqtProperty(str, constant=True)
def photosDir(self):
return QStandardPaths.standardLocations(QStandardPaths.PicturesLocation)[0]
``` |
[
{
"content": "Repeat the code precisely:\n```python\n# coding: utf-8\n\n\nclass Solution:\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if s == \"\" or s[0] == '0':\n return 0\n\n # Initialization\n before_previous = ... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n# coding: utf-8\n\n\nclass Solution:\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if s == \"\" or s[0] == '0':\n return 0\n\n # Initialization\n be... | ```python
# coding: utf-8
class Solution:
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if s == "" or s[0] == '0':
return 0
# Initialization
before_previous = 0
previous_one = 0
current = 1
for i in range(1, len(s)):
before_previous = previous_one
previous_one = current
# When current charactor is '0', if previous charactor is in ['1', '2'],
# f(i) = f(i-2), otherwise result is 0
if s[i] == '0':
if s[i-1] not in ['1', '2']:
return 0
# In particular, when i = 2 or before_previous == 0, current = 1
current = before_previous if before_previous else 1
continue
# f(i) = f(i - 1)
current = previous_one
if s[i-1] != '0' and int(s[i-1] + s[i]) <= 26:
# f(i) = f(i - 1) + f(i - 2)
current += before_previous if before_previous else 1
return current
``` |
[
{
"content": "Repeat the following code:\n```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\" This module contains an implementation of the 'b64' transport. \"\"\"\n\nfrom obfsproxy.transports.base import BaseTransport\n\nimport base64\n\nimport obfsproxy.common.log as logging\n\nlog = logging.get_o... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\" This module contains an implementation of the 'b64' transport. \"\"\"\n\nfrom obfsproxy.transports.base import BaseTransport\n\nimport base64\n\nimport obfsproxy.common.log as logging\n\nlog... | ```python
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" This module contains an implementation of the 'b64' transport. """
from obfsproxy.transports.base import BaseTransport
import base64
import obfsproxy.common.log as logging
log = logging.get_obfslogger()
def _get_b64_chunks_from_str(string):
"""
Given a 'string' of concatenated base64 objects, return a list
with the objects.
Assumes that the objects are well-formed base64 strings. Also
assumes that the padding character of base64 is '='.
"""
chunks = []
while True:
pad_loc = string.find('=')
if pad_loc < 0 or pad_loc == len(string)-1 or pad_loc == len(string)-2:
# If there is no padding, or it's the last chunk: append
# it to chunks and return.
chunks.append(string)
return chunks
if pad_loc != len(string)-1 and string[pad_loc+1] == '=': # double padding
pad_loc += 1
# Append the object to the chunks, and prepare the string for
# the next iteration.
chunks.append(string[:pad_loc+1])
string = string[pad_loc+1:]
return chunks
class B64Transport(BaseTransport):
"""
Implements the b64 protocol. A protocol that encodes data with
base64 before pushing them to the network.
"""
def __init__(self, transport_config):
super(B64Transport, self).__init__()
def receivedDownstream(self, data):
"""
Got data from downstream; relay them upstream.
"""
decoded_data = ''
# TCP is a stream protocol: the data we received might contain
# more than one b64 chunk. We should inspect the data and
# split it into multiple chunks.
b64_chunks = _get_b64_chunks_from_str(data.peek())
# Now b64 decode each chunk and append it to the our decoded
# data.
for chunk in b64_chunks:
try:
decoded_data += base64.b64decode(chunk)
except TypeError:
log.info("We got corrupted b64 ('%s')." % chunk)
return
data.drain()
self.circuit.upstream.write(decoded_data)
def receivedUpstream(self, data):
"""
Got data from upstream; relay them downstream.
"""
self.circuit.downstream.write(base64.b64encode(data.read()))
return
class B64Client(B64Transport):
pass
class B64Server(B64Transport):
pass
``` |
[
{
"content": "Here is the script:\n```python\nimport argparse\nimport os\nimport struct\n\nimport tqdm\n\nfrom program_synthesis.karel.tools import indexed_file\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', required=True)\n parser.add_argument('--... | [
{
"content": "Here is the script:\n<|memory_start|>```python\nimport argparse\nimport os\nimport struct\n\nimport tqdm\n\nfrom program_synthesis.karel.tools import indexed_file\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', required=True)\n parser.... | ```python
import argparse
import os
import struct
import tqdm
from program_synthesis.karel.tools import indexed_file
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--num-pieces', required=True, type=int)
args = parser.parse_args()
root, ext = os.path.splitext(args.input)
index = []
with open(args.input + '.index') as index_file:
while True:
offset = index_file.read(8)
if not offset:
break
offset, = struct.unpack('<Q', offset)
index.append(offset)
num_elems = len(index)
pieces_num_elems = [num_elems // args.num_pieces] * args.num_pieces
pieces_num_elems[0] += num_elems - sum(pieces_num_elems)
index.append(os.stat(args.input).st_size)
input_file = open(args.input)
index_offset = 0
for i, piece_num_elems in enumerate(tqdm.tqdm(pieces_num_elems)):
piece_name = '{}-{:03d}-of-{:03d}{}'.format(
root, i, args.num_pieces, ext)
piece_start = index[index_offset]
piece_end = index[index_offset + piece_num_elems]
piece_size = piece_end - piece_start
input_file.seek(piece_start)
with open(piece_name, 'w') as output_file:
total_written = 0
while total_written < piece_size:
chunk = input_file.read(
min(1024768, piece_size - total_written))
assert chunk, 'EOF reached unexpectedly'
output_file.write(chunk)
total_written += len(chunk)
piece_index = [
v - piece_start
for v in index[index_offset:index_offset + piece_num_elems]
]
with open(piece_name + '.index', 'w') as index_file:
for v in piece_index:
index_file.write(struct.pack('<Q', v))
index_offset += piece_num_elems
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n#!/usr/bin/python\n#\n# Copyright (c) 2014-2015 Sylvain Peyrefitte\n#\n# This file is part of rdpy.\n#\n# rdpy is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as publi... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n#!/usr/bin/python\n#\n# Copyright (c) 2014-2015 Sylvain Peyrefitte\n#\n# This file is part of rdpy.\n#\n# rdpy is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public ... | ```python
#!/usr/bin/python
#
# Copyright (c) 2014-2015 Sylvain Peyrefitte
#
# This file is part of rdpy.
#
# rdpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
RDP proxy with Man in the middle capabilities
Save RDP events in output RSR file format
RSR file format can be read by rdpy-rsrplayer.py
----------------------------
Client RDP -> | ProxyServer | ProxyClient | -> Server RDP
----------------------------
| Record Session |
-----------------
"""
import sys
import os
import argparse
import time
from rdpy.core import log, error, rss
from rdpy.protocol.rdp import rdp
from twisted.internet import reactor
log._LOG_LEVEL = log.Level.INFO
class ProxyServer(rdp.RDPServerObserver):
"""
@summary: Server side of proxy
"""
def __init__(self, controller, target, clientSecurityLevel, rssRecorder):
"""
@param controller: {RDPServerController}
@param target: {tuple(ip, port)}
@param rssRecorder: {rss.FileRecorder} use to record session
"""
rdp.RDPServerObserver.__init__(self, controller)
self._target = target
self._client = None
self._rss = rssRecorder
self._clientSecurityLevel = clientSecurityLevel
def setClient(self, client):
"""
@summary: Event throw by client when it's ready
@param client: {ProxyClient}
"""
self._client = client
def onReady(self):
"""
@summary: Event use to inform state of server stack
First time this event is called is when human client is connected
Second time is after color depth nego, because color depth nego
restart a connection sequence
@see: rdp.RDPServerObserver.onReady
"""
if self._client is None:
# try a connection
domain, username, password = self._controller.getCredentials()
self._rss.credentials(username, password,
domain, self._controller.getHostname())
width, height = self._controller.getScreen()
self._rss.screen(width, height, self._controller.getColorDepth())
reactor.connectTCP(self._target[0], int(self._target[1]), ProxyClientFactory(self, width, height,
domain, username, password, self._clientSecurityLevel))
def onClose(self):
"""
@summary: Call when human client close connection
@see: rdp.RDPServerObserver.onClose
"""
# end scenario
self._rss.close()
# close network stack
if self._client is None:
return
self._client._controller.close()
def onKeyEventScancode(self, code, isPressed, isExtended):
"""
@summary: Event call when a keyboard event is catch in scan code format
@param code: {integer} scan code of key
@param isPressed: {boolean} True if key is down
@param isExtended: {boolean} True if a special key
@see: rdp.RDPServerObserver.onKeyEventScancode
"""
if self._client is None:
return
self._client._controller.sendKeyEventScancode(
code, isPressed, isExtended)
self._rss.keyScancode(code, isPressed)
def onKeyEventUnicode(self, code, isPressed):
"""
@summary: Event call when a keyboard event is catch in unicode format
@param code: unicode of key
@param isPressed: True if key is down
@see: rdp.RDPServerObserver.onKeyEventUnicode
"""
if self._client is None:
return
self._client._controller.sendKeyEventUnicode(code, isPressed)
self._rss.keyUnicode(code, isPressed)
def onPointerEvent(self, x, y, button, isPressed):
"""
@summary: Event call on mouse event
@param x: {int} x position
@param y: {int} y position
@param button: {int} 1, 2, 3, 4 or 5 button
@param isPressed: {bool} True if mouse button is pressed
@see: rdp.RDPServerObserver.onPointerEvent
"""
if self._client is None:
return
self._client._controller.sendPointerEvent(x, y, button, isPressed)
class ProxyServerFactory(rdp.ServerFactory):
"""
@summary: Factory on listening events
"""
def __init__(self, target, ouputDir, privateKeyFilePath, certificateFilePath, clientSecurity):
"""
@param target: {tuple(ip, prt)}
@param privateKeyFilePath: {str} file contain server private key (if none -> back to standard RDP security)
@param certificateFilePath: {str} file contain server certificate (if none -> back to standard RDP security)
@param clientSecurity: {str(ssl|rdp)} security layer use in client connection side
"""
rdp.ServerFactory.__init__(
self, 16, privateKeyFilePath, certificateFilePath)
self._target = target
self._ouputDir = ouputDir
self._clientSecurity = clientSecurity
# use produce unique file by connection
self._uniqueId = 0
def buildObserver(self, controller, addr):
"""
@param controller: {rdp.RDPServerController}
@param addr: destination address
@see: rdp.ServerFactory.buildObserver
"""
self._uniqueId += 1
return ProxyServer(controller, self._target, self._clientSecurity, rss.createRecorder(os.path.join(self._ouputDir, "%s_%s_%s.rss" % (time.strftime('%Y%m%d%H%M%S'), addr.host, self._uniqueId))))
class ProxyClient(rdp.RDPClientObserver):
"""
@summary: Client side of proxy
"""
def __init__(self, controller, server):
"""
@param controller: {rdp.RDPClientController}
@param server: {ProxyServer}
"""
rdp.RDPClientObserver.__init__(self, controller)
self._server = server
def onReady(self):
"""
@summary: Event use to signal that RDP stack is ready
Inform ProxyServer that i'm connected
@see: rdp.RDPClientObserver.onReady
"""
self._server.setClient(self)
# maybe color depth change
self._server._controller.setColorDepth(
self._controller.getColorDepth())
def onSessionReady(self):
"""
@summary: Windows session is ready
@see: rdp.RDPClientObserver.onSessionReady
"""
pass
def onClose(self):
"""
@summary: Event inform that stack is close
@see: rdp.RDPClientObserver.onClose
"""
# end scenario
self._server._rss.close()
self._server._controller.close()
def onUpdate(self, destLeft, destTop, destRight, destBottom, width, height, bitsPerPixel, isCompress, data):
"""
@summary: Event use to inform bitmap update
@param destLeft: {int} xmin position
@param destTop: {int} ymin position
@param destRight: {int} xmax position because RDP can send bitmap with padding
@param destBottom: {int} ymax position because RDP can send bitmap with padding
@param width: {int} width of bitmap
@param height: {int} height of bitmap
@param bitsPerPixel: {int} number of bit per pixel
@param isCompress: {bool} use RLE compression
@param data: {str} bitmap data
@see: rdp.RDPClientObserver.onUpdate
"""
self._server._rss.update(destLeft, destTop, destRight, destBottom, width, height,
bitsPerPixel, rss.UpdateFormat.BMP if isCompress else rss.UpdateFormat.RAW, data)
self._server._controller.sendUpdate(
destLeft, destTop, destRight, destBottom, width, height, bitsPerPixel, isCompress, data)
class ProxyClientFactory(rdp.ClientFactory):
"""
@summary: Factory for proxy client
"""
def __init__(self, server, width, height, domain, username, password, security):
"""
@param server: {ProxyServer}
@param width: {int} screen width
@param height: {int} screen height
@param domain: {str} domain session
@param username: {str} username session
@param password: {str} password session
@param security: {str(ssl|rdp)} security level
"""
self._server = server
self._width = width
self._height = height
self._domain = domain
self._username = username
self._password = password
self._security = security
def buildObserver(self, controller, addr):
"""
@summary: Build observer
@param controller: rdp.RDPClientController
@param addr: destination address
@see: rdp.ClientFactory.buildObserver
@return: ProxyClient
"""
# set screen resolution
controller.setScreen(self._width, self._height)
# set credential
controller.setDomain(self._domain)
controller.setUsername(self._username)
controller.setPassword(self._password)
controller.setSecurityLevel(self._security)
controller.setPerformanceSession()
return ProxyClient(controller, self._server)
def parseIpPort(interface, defaultPort="3389"):
if ':' in interface:
s = interface.split(':')
return s[0], int(s[1])
else:
return interface, int(defaultPort)
def isDirectory(outputDirectory):
if outputDirectory is None or not os.path.dirname(outputDirectory):
log.error("{} is an invalid output directory or directory doesn't exist".format(
outputDirectory))
return outputDirectory
def mapSecurityLayer(layer):
return {
"rdp": rdp.SecurityLevel.RDP_LEVEL_RDP,
"tls": rdp.SecurityLevel.RDP_LEVEL_SSL,
"nla": rdp.SecurityLevel.RDP_LEVEL_NLA
}[layer]
if __name__ == '__main__':
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('-l', '--listen', type=parseIpPort, default="0.0.0.0:3389",
help="<addr>[:<port>] to bind the server")
p.add_argument('-t', '--target', type=parseIpPort, required=True,
help="<addr>[:<port>] of the target you want to connect to via proxy")
p.add_argument('-o', '--output', type=isDirectory,
help="output directory", required=True)
p.add_argument('-s', '--sec', choices=["rdp", "tls", "nla"],
default="rdp", help="set protocol security layer")
ssl = p.add_argument_group()
ssl.add_argument('-c', '--certificate', help="certificate for TLS connections")
ssl.add_argument('-k', '--key', help="private key of the given certificate for TLS connections")
args = p.parse_args()
if args.certificate and args.key and not args.sec == "nla":
args.sec = "tls"
log.info("running server on {addr}, using {sec} security layer, proxying to {target}".format(
addr=args.listen, sec=args.sec.upper(), target=args.target))
reactor.listenTCP(args.listen[1], ProxyServerFactory(
args.target, args.output, args.key, args.certificate, mapSecurityLayer(args.sec)),
interface=args.listen[0])
reactor.run()
``` |
[
{
"content": "Here is the snippet:\n```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field '... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n ... | ```python
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SonoSensor.muscle'
db.add_column(u'feed_sonosensor', 'muscle',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feed.MuscleOwl'], null=True),
keep_default=False)
# Adding field 'EmgSensor.muscle'
db.add_column(u'feed_emgsensor', 'muscle',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feed.MuscleOwl'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SonoSensor.muscle'
db.delete_column(u'feed_sonosensor', 'muscle_id')
# Deleting field 'EmgSensor.muscle'
db.delete_column(u'feed_emgsensor', 'muscle_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feed.ageunit': {
'Meta': {'ordering': "['label']", 'object_name': 'AgeUnit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ageunit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anatomicallocation': {
'Meta': {'ordering': "['label']", 'object_name': 'AnatomicalLocation'},
'category': ('django.db.models.fields.IntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anatomicallocation_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anteriorposterioraxis': {
'Meta': {'object_name': 'AnteriorPosteriorAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anteriorposterioraxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behavior': {
'Meta': {'ordering': "['label']", 'object_name': 'Behavior'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'behavior_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behaviorowl': {
'Meta': {'object_name': 'BehaviorOwl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.BehaviorOwl']", 'symmetrical': 'False'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.channel': {
'Meta': {'object_name': 'Channel'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channel_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.channellineup': {
'Meta': {'ordering': "['position']", 'object_name': 'ChannelLineup'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Channel']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channellineup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.depthaxis': {
'Meta': {'object_name': 'DepthAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'depthaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.developmentstage': {
'Meta': {'ordering': "['label']", 'object_name': 'DevelopmentStage'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'developmentstage_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.dorsalventralaxis': {
'Meta': {'object_name': 'DorsalVentralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dorsalventralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.electrodetype': {
'Meta': {'ordering': "['label']", 'object_name': 'ElectrodeType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'electrodetype_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgchannel': {
'Meta': {'object_name': 'EmgChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'emg_amplification': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'emg_filtering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Emgfiltering']"}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.EmgSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.emgfiltering': {
'Meta': {'ordering': "['label']", 'object_name': 'Emgfiltering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'emgfiltering_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgsensor': {
'Meta': {'ordering': "['id']", 'object_name': 'EmgSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'electrode_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ElectrodeType']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.emgsetup': {
'Meta': {'object_name': 'EmgSetup', '_ormbases': [u'feed.Setup']},
'preamplifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.eventchannel': {
'Meta': {'object_name': 'EventChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.eventsetup': {
'Meta': {'object_name': 'EventSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.experiment': {
'Meta': {'object_name': 'Experiment'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impl_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'subj_age': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subj_ageunit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AgeUnit']", 'null': 'True', 'blank': 'True'}),
'subj_devstage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DevelopmentStage']"}),
'subj_tooth': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']"}),
'subject_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.forcechannel': {
'Meta': {'object_name': 'ForceChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ForceSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.forcesensor': {
'Meta': {'object_name': 'ForceSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.forcesetup': {
'Meta': {'object_name': 'ForceSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.illustration': {
'Meta': {'object_name': 'Illustration'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'illustration_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']", 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.kinematicschannel': {
'Meta': {'object_name': 'KinematicsChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.KinematicsSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.kinematicssensor': {
'Meta': {'object_name': 'KinematicsSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.kinematicssetup': {
'Meta': {'object_name': 'KinematicsSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.mediallateralaxis': {
'Meta': {'object_name': 'MedialLateralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'mediallateralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.muscleowl': {
'Meta': {'object_name': 'MuscleOwl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.MuscleOwl']", 'symmetrical': 'False'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.pressurechannel': {
'Meta': {'object_name': 'PressureChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.PressureSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.pressuresensor': {
'Meta': {'object_name': 'PressureSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.pressuresetup': {
'Meta': {'object_name': 'PressureSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.proximaldistalaxis': {
'Meta': {'object_name': 'ProximalDistalAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proximaldistalaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.restraint': {
'Meta': {'ordering': "['label']", 'object_name': 'Restraint'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'restraint_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sensor': {
'Meta': {'object_name': 'Sensor'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sensor_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loc_ap': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnteriorPosteriorAxis']", 'null': 'True', 'blank': 'True'}),
'loc_dv': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DorsalVentralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_ml': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MedialLateralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_pd': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ProximalDistalAxis']", 'null': 'True', 'blank': 'True'}),
'loc_side': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Side']"}),
'location_freetext': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.session': {
'Meta': {'ordering': "['position']", 'object_name': 'Session'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.Channel']", 'through': u"orm['feed.ChannelLineup']", 'symmetrical': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_anesthesia_sedation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_restraint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Restraint']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.setup': {
'Meta': {'object_name': 'Setup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'setup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sampling_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.side': {
'Meta': {'ordering': "['label']", 'object_name': 'Side'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'side_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sonochannel': {
'Meta': {'object_name': 'SonoChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'crystal1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals1_related'", 'to': u"orm['feed.SonoSensor']"}),
'crystal2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals2_related'", 'to': u"orm['feed.SonoSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.sonosensor': {
'Meta': {'object_name': 'SonoSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.sonosetup': {
'Meta': {'object_name': 'SonoSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'}),
'sonomicrometer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.strainchannel': {
'Meta': {'object_name': 'StrainChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.StrainSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.strainsensor': {
'Meta': {'object_name': 'StrainSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.strainsetup': {
'Meta': {'object_name': 'StrainSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.study': {
'Meta': {'ordering': "['title']", 'object_name': 'Study'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'approval_secured': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'study_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'funding_agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.studyprivate': {
'Meta': {'object_name': 'StudyPrivate'},
'approval': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'studyprivate_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'funding': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lab': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pi': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.subject': {
'Meta': {'object_name': 'Subject'},
'breed': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subject_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'taxon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Taxon']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.taxon': {
'Meta': {'ordering': "['genus']", 'object_name': 'Taxon'},
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'taxon_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.trial': {
'Meta': {'object_name': 'Trial'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'behavior_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'behavior_primary': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Behavior']"}),
'behavior_secondary': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trial_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'estimated_duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'food_property': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_treatment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {}),
'waveform_picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'feed.unit': {
'Meta': {'ordering': "['technique', 'label']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'unit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['feed']
``` |
[
{
"content": "```python\n#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the fi... | [
{
"content": "<|memory_start|>```python\n#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full lice... | ```python
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
`distutils`-specific post-import hook.
This hook freezes the external `Makefile` and `pyconfig.h` files bundled with
the active Python interpreter, which the `distutils.sysconfig` module parses at
runtime for platform-specific metadata.
"""
# TODO Verify that bundling Makefile and pyconfig.h is still required for Python 3.
import os
import sysconfig
from PyInstaller.utils.hooks import relpath_to_config_or_make
_CONFIG_H = sysconfig.get_config_h_filename()
if hasattr(sysconfig, 'get_makefile_filename'):
# sysconfig.get_makefile_filename is missing in Python < 2.7.9
_MAKEFILE = sysconfig.get_makefile_filename()
else:
_MAKEFILE = sysconfig._get_makefile_filename()
# Data files in PyInstaller hook format.
datas = [(_CONFIG_H, relpath_to_config_or_make(_CONFIG_H))]
# The Makefile does not exist on all platforms, eg. on Windows
if os.path.exists(_MAKEFILE):
datas.append((_MAKEFILE, relpath_to_config_or_make(_MAKEFILE)))
``` |
[
{
"content": "Repeat the full code snippet:\n```python\nfrom __future__ import absolute_import\n# Copyright (c) 2010-2018 openpyxl\n\nimport re\n\nfrom openpyxl.compat import unicode, long\n\nfrom openpyxl.cell import Cell\nfrom openpyxl.utils import get_column_letter\nfrom openpyxl.utils.datetime import from_e... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\nfrom __future__ import absolute_import\n# Copyright (c) 2010-2018 openpyxl\n\nimport re\n\nfrom openpyxl.compat import unicode, long\n\nfrom openpyxl.cell import Cell\nfrom openpyxl.utils import get_column_letter\nfrom openpyxl.utils.dateti... | ```python
from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
import re
from openpyxl.compat import unicode, long
from openpyxl.cell import Cell
from openpyxl.utils import get_column_letter
from openpyxl.utils.datetime import from_excel
from openpyxl.styles import is_date_format
from openpyxl.styles.numbers import BUILTIN_FORMATS
FLOAT_REGEX = re.compile(r"\.|[E-e]")
def _cast_number(value):
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return long(value)
class ReadOnlyCell(object):
__slots__ = ('parent', 'row', 'column', '_value', 'data_type', '_style_id')
def __init__(self, sheet, row, column, value, data_type='n', style_id=0):
self.parent = sheet
self._value = None
self.row = row
self.column = column
self.data_type = data_type
self.value = value
self._style_id = style_id
def __eq__(self, other):
for a in self.__slots__:
if getattr(self, a) != getattr(other, a):
return
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<ReadOnlyCell {0!r}.{1}>".format(self.parent.title, self.coordinate)
@property
def shared_strings(self):
return self.parent.shared_strings
@property
def base_date(self):
return self.parent.base_date
@property
def coordinate(self):
column = get_column_letter(self.column)
return "{1}{0}".format(self.row, column)
@property
def style_array(self):
return self.parent.parent._cell_styles[self._style_id]
@property
def number_format(self):
_id = self.style_array.numFmtId
if _id < 164:
return BUILTIN_FORMATS.get(_id, "General")
else:
return self.parent.parent._number_formats[_id - 164]
@property
def font(self):
_id = self.style_array.fontId
return self.parent.parent._fonts[_id]
@property
def fill(self):
_id = self.style_array.fillId
return self.parent.parent._fills[_id]
@property
def border(self):
_id = self.style_array.borderId
return self.parent.parent._borders[_id]
@property
def alignment(self):
_id = self.style_array.alignmentId
return self.parent.parent._alignments[_id]
@property
def protection(self):
_id = self.style_array.protectionId
return self.parent.parent._protections[_id]
@property
def is_date(self):
return self.data_type == 'n' and is_date_format(self.number_format)
@property
def internal_value(self):
return self._value
@property
def value(self):
if self._value is None:
return
if self.data_type == 'n':
if self.style_array:
if is_date_format(self.number_format):
return from_excel(self._value, self.base_date)
return self._value
if self.data_type == 'b':
return self._value == '1'
elif self.data_type in(Cell.TYPE_INLINE, Cell.TYPE_FORMULA_CACHE_STRING):
return unicode(self._value)
elif self.data_type == 's':
return unicode(self.shared_strings[int(self._value)])
return self._value
@value.setter
def value(self, value):
if self._value is not None:
raise AttributeError("Cell is read only")
if value is None:
self.data_type = 'n'
elif self.data_type == 'n':
value = _cast_number(value)
self._value = value
class EmptyCell(object):
__slots__ = ()
value = None
is_date = False
font = None
border = None
fill = None
number_format = None
alignment = None
data_type = 'n'
def __repr__(self):
return "<EmptyCell>"
EMPTY_CELL = EmptyCell()
``` |
[
{
"content": "```python\n#Copyright (C) 2017 Paolo Galeone <nessuno@nerdz.eu>\n#\n#This Source Code Form is subject to the terms of the Mozilla Public\n#License, v. 2.0. If a copy of the MPL was not distributed with this\n#file, you can obtain one at http://mozilla.org/MPL/2.0/.\n#Exhibit B is not attached; thi... | [
{
"content": "<|memory_start|>```python\n#Copyright (C) 2017 Paolo Galeone <nessuno@nerdz.eu>\n#\n#This Source Code Form is subject to the terms of the Mozilla Public\n#License, v. 2.0. If a copy of the MPL was not distributed with this\n#file, you can obtain one at http://mozilla.org/MPL/2.0/.\n#Exhibit B is n... | ```python
#Copyright (C) 2017 Paolo Galeone <nessuno@nerdz.eu>
#
#This Source Code Form is subject to the terms of the Mozilla Public
#License, v. 2.0. If a copy of the MPL was not distributed with this
#file, you can obtain one at http://mozilla.org/MPL/2.0/.
#Exhibit B is not attached; this software is compatible with the
#licenses expressed under Section 1.12 of the MPL v2.
"""PASCAL VOC 2012"""
import os
import sys
import tarfile
import xml.etree.ElementTree as etree
import csv
from collections import defaultdict
from six.moves import urllib
import tensorflow as tf
from ..processing import build_batch
from ..images import read_image_jpg
from ..interfaces.Input import Input
from ..interfaces.InputType import InputType
class PASCALVOC2012Classification(Input):
"""Routine for decoding the PASCAL VOC 2012 binary file format."""
def __init__(self, add_input_to_label=False):
# Global constants describing the PASCAL VOC 2012 data set.
# resize image to a fixed size
# the resize dimension is an hyperparameter
self._name = 'PASCAL-VOC-2012-Classification'
self._image_height = 150
self._image_width = 150
self._image_depth = 3
# multiple boxes enable the return of a tensor
# of boxes instead of a single box per image
self._multiple_bboxes = False
self.CLASSES = [
"aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car",
"cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
"person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]
self._bboxes = {"train": defaultdict(list), "val": defaultdict(list)}
self._tf_bboxes = {"train": None, "val": None}
self._num_classes = 20
self._num_examples_per_epoch_for_train = 13609
self._num_examples_per_epoch_for_eval = 13841
self._num_examples_per_epoch_for_test = self._num_examples_per_epoch_for_eval
self._data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data', 'PASCALVOC2012')
self._data_url = 'http://pjreddie.com/media/files/VOCtrainval_11-May-2012.tar'
self._maybe_download_and_extract()
self._add_input_to_label = add_input_to_label
@property
def name(self):
"""Returns the name of the input source"""
return self._name
def num_examples(self, input_type):
"""Returns the number of examples per the specified input_type
Args:
input_type: InputType enum
"""
InputType.check(input_type)
if input_type == InputType.train:
return self._num_examples_per_epoch_for_train
elif input_type == InputType.test:
return self._num_examples_per_epoch_for_test
return self._num_examples_per_epoch_for_eval
@property
def num_classes(self):
"""Returns the number of classes"""
return self._num_classes
def _read_image_and_box(self, bboxes_csv):
"""Extract the filename from the queue, read the image and
produce a single box
Returns:
image, box
"""
reader = tf.TextLineReader(skip_header_lines=True)
_, row = reader.read(bboxes_csv)
# file ,y_min, x_min, y_max, x_max, label
record_defaults = [[""], [0.], [0.], [0.], [0.], [0.]]
# eg:
# 2008_000033,0.1831831831831832,0.208,0.7717717717717718,0.952,0
filename, y_min, x_min, y_max, x_max, label = tf.decode_csv(
row, record_defaults)
image_path = os.path.join(self._data_dir, 'VOCdevkit', 'VOC2012',
'JPEGImages') + "/" + filename + ".jpg"
# image is normalized in [-1,1], convert to #_image_depth depth
image = read_image_jpg(image_path, depth=self._image_depth)
return image, tf.stack([y_min, x_min, y_max, x_max, label])
def _read(self, filename_queue):
image, bbox_and_label = self._read_image_and_box(
filename_queue) #bbox is a single box
bbox = bbox_and_label[:4]
label = tf.cast(bbox_and_label[-1], tf.int32)
image = tf.squeeze(
tf.image.crop_and_resize(
tf.expand_dims(image, axis=0),
tf.expand_dims(bbox, axis=0),
box_ind=[0],
crop_size=[self._image_height, self._image_width]),
axis=[0])
return image, label
def inputs(self, input_type, batch_size, augmentation_fn=None):
"""Construct input for PASCALVOC2012Classification evaluation using the Reader ops.
Args:
input_type: InputType enum
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, self._image_height, self._image_width, self._image_depth] size.
labels: tensor with batch_size labels
"""
InputType.check(input_type)
if input_type == InputType.train:
filenames = [os.path.join(self._data_dir, 'train.csv')]
num_examples_per_epoch = self._num_examples_per_epoch_for_train
else:
filenames = [os.path.join(self._data_dir, 'val.csv')]
num_examples_per_epoch = self._num_examples_per_epoch_for_eval
for name in filenames:
if not tf.gfile.Exists(name):
raise ValueError('Failed to find file: ' + name)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(
num_examples_per_epoch * min_fraction_of_examples_in_queue)
with tf.variable_scope("{}_input".format(input_type)):
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
image, label = self._read(filename_queue)
if augmentation_fn:
image = augmentation_fn(image)
return build_batch(
image,
label if not self._add_input_to_label else [label, image],
min_queue_examples,
batch_size,
shuffle=input_type == InputType.train)
def _maybe_download_and_extract(self):
"""Download and extract the tarball"""
dest_directory = self._data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = self._data_url.split('/')[-1]
archivepath = os.path.join(dest_directory, filename)
if not os.path.exists(archivepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
archivepath, _ = urllib.request.urlretrieve(self._data_url,
archivepath, _progress)
print()
statinfo = os.stat(archivepath)
print('Successfully downloaded', filename, statinfo.st_size,
'bytes.')
tarfile.open(archivepath, 'r').extractall(dest_directory)
print('Sucessfully extracted.')
# Now self._data dir contains VOCDevkit folder
# Build train.csv and val.csv file in self._data_dir
csv_header = ["filename", "y_min", "x_min", "y_max", "x_max", "label"]
if os.path.exists(os.path.join(
self._data_dir, 'train.csv')) and os.path.exists(
os.path.join(self._data_dir, 'val.csv')):
return
base_dir = os.path.join(
self._data_dir,
'VOCdevkit',
'VOC2012',
)
for current_set in ['train', 'val']:
csv_path = os.path.join(self._data_dir,
'{}.csv'.format(current_set))
with open(csv_path, mode='w') as csv_file:
# header
writer = csv.DictWriter(csv_file, csv_header)
writer.writeheader()
for current_class in self.CLASSES:
lines = open(
os.path.join(
base_dir, 'ImageSets', 'Main', '{}_{}.txt'.format(
current_class,
current_set))).read().strip().split("\n")
for line in lines:
splitted = line.split()
if len(splitted) < 1:
print(splitted, line, current_class)
if splitted[1] == "-1":
continue
image_xml = os.path.join(base_dir, 'Annotations',
'{}.xml'.format(splitted[0]))
image_filename = splitted[0]
# parse XML
tree = etree.parse(image_xml)
root = tree.getroot()
size = root.find('size')
width = float(size.find('width').text)
height = float(size.find('height').text)
for obj in root.iter('object'):
# skip difficult & object.name not in current class
label = obj.find('name').text
if label != current_class:
continue
difficult = obj.find('difficult').text
if int(difficult) == 1:
continue
bndbox = obj.find('bndbox')
normalized_bbox = [
# y_min
float(bndbox.find('ymin').text) / height,
# x_min
float(bndbox.find('xmin').text) / width,
# y_max
float(bndbox.find('ymax').text) / height,
# x_max
float(bndbox.find('xmax').text) / width
]
label_id = self.CLASSES.index(current_class)
writer.writerow({
"filename": image_filename,
"y_min": normalized_bbox[0],
"x_min": normalized_bbox[1],
"y_max": normalized_bbox[2],
"x_max": normalized_bbox[3],
"label": label_id
})
print('{}.csv created'.format(current_set))
``` |
[
{
"content": "```python\nimport typing as T\n\nimport pytest\nfrom docstring_parser.numpydoc import parse\n\n\n@pytest.mark.parametrize(\n \"source, expected\",\n [\n (\"\", None),\n (\"\\n\", None),\n (\"Short description\", \"Short description\"),\n (\"\\nShort description\\n... | [
{
"content": "<|memory_start|>```python\nimport typing as T\n\nimport pytest\nfrom docstring_parser.numpydoc import parse\n\n\n@pytest.mark.parametrize(\n \"source, expected\",\n [\n (\"\", None),\n (\"\\n\", None),\n (\"Short description\", \"Short description\"),\n (\"\\nShor... | ```python
import typing as T
import pytest
from docstring_parser.numpydoc import parse
@pytest.mark.parametrize(
"source, expected",
[
("", None),
("\n", None),
("Short description", "Short description"),
("\nShort description\n", "Short description"),
("\n Short description\n", "Short description"),
],
)
def test_short_description(source: str, expected: str) -> None:
docstring = parse(source)
assert docstring.short_description == expected
assert docstring.long_description is None
assert docstring.meta == []
@pytest.mark.parametrize(
"source, expected_short_desc, expected_long_desc, expected_blank",
[
(
"Short description\n\nLong description",
"Short description",
"Long description",
True,
),
(
"""
Short description
Long description
""",
"Short description",
"Long description",
True,
),
(
"""
Short description
Long description
Second line
""",
"Short description",
"Long description\nSecond line",
True,
),
(
"Short description\nLong description",
"Short description",
"Long description",
False,
),
(
"""
Short description
Long description
""",
"Short description",
"Long description",
False,
),
(
"\nShort description\nLong description\n",
"Short description",
"Long description",
False,
),
(
"""
Short description
Long description
Second line
""",
"Short description",
"Long description\nSecond line",
False,
),
],
)
def test_long_description(
source: str,
expected_short_desc: str,
expected_long_desc: str,
expected_blank: bool,
) -> None:
docstring = parse(source)
assert docstring.short_description == expected_short_desc
assert docstring.long_description == expected_long_desc
assert docstring.blank_after_short_description == expected_blank
assert docstring.meta == []
@pytest.mark.parametrize(
"source, expected_short_desc, expected_long_desc, "
"expected_blank_short_desc, expected_blank_long_desc",
[
(
"""
Short description
Parameters
----------
asd
""",
"Short description",
None,
False,
False,
),
(
"""
Short description
Long description
Parameters
----------
asd
""",
"Short description",
"Long description",
False,
False,
),
(
"""
Short description
First line
Second line
Parameters
----------
asd
""",
"Short description",
"First line\n Second line",
False,
False,
),
(
"""
Short description
First line
Second line
Parameters
----------
asd
""",
"Short description",
"First line\n Second line",
True,
False,
),
(
"""
Short description
First line
Second line
Parameters
----------
asd
""",
"Short description",
"First line\n Second line",
True,
True,
),
(
"""
Parameters
----------
asd
""",
None,
None,
False,
False,
),
],
)
def test_meta_newlines(
source: str,
expected_short_desc: T.Optional[str],
expected_long_desc: T.Optional[str],
expected_blank_short_desc: bool,
expected_blank_long_desc: bool,
) -> None:
docstring = parse(source)
assert docstring.short_description == expected_short_desc
assert docstring.long_description == expected_long_desc
assert docstring.blank_after_short_description == expected_blank_short_desc
assert docstring.blank_after_long_description == expected_blank_long_desc
assert len(docstring.meta) == 1
def test_meta_with_multiline_description() -> None:
docstring = parse(
"""
Short description
Parameters
----------
spam
asd
1
2
3
"""
)
assert docstring.short_description == "Short description"
assert len(docstring.meta) == 1
assert docstring.meta[0].args == ["param", "spam"]
assert docstring.meta[0].arg_name == "spam"
assert docstring.meta[0].description == "asd\n1\n 2\n3"
def test_default_args():
docstring = parse(
"""
A sample function
A function the demonstrates docstrings
Parameters
----------
arg1 : int
The firsty arg
arg2 : str
The second arg
arg3 : float, optional
The third arg. Default is 1.0.
arg4 : Optional[Dict[str, Any]], optional
The fourth arg. Defaults to None
arg5 : str, optional
The fifth arg. Default: DEFAULT_ARGS
Returns
-------
Mapping[str, Any]
The args packed in a mapping
"""
)
assert docstring is not None
assert len(docstring.params) == 5
arg4 = docstring.params[3]
assert arg4.arg_name == "arg4"
assert arg4.is_optional
assert arg4.type_name == "Optional[Dict[str, Any]]"
assert arg4.default == "None"
assert arg4.description == "The fourth arg. Defaults to None"
def test_multiple_meta() -> None:
docstring = parse(
"""
Short description
Parameters
----------
spam
asd
1
2
3
Raises
------
bla
herp
yay
derp
"""
)
assert docstring.short_description == "Short description"
assert len(docstring.meta) == 3
assert docstring.meta[0].args == ["param", "spam"]
assert docstring.meta[0].arg_name == "spam"
assert docstring.meta[0].description == "asd\n1\n 2\n3"
assert docstring.meta[1].args == ["raises", "bla"]
assert docstring.meta[1].type_name == "bla"
assert docstring.meta[1].description == "herp"
assert docstring.meta[2].args == ["raises", "yay"]
assert docstring.meta[2].type_name == "yay"
assert docstring.meta[2].description == "derp"
def test_params() -> None:
docstring = parse("Short description")
assert len(docstring.params) == 0
docstring = parse(
"""
Short description
Parameters
----------
name
description 1
priority : int
description 2
sender : str, optional
description 3
ratio : Optional[float], optional
description 4
"""
)
assert len(docstring.params) == 4
assert docstring.params[0].arg_name == "name"
assert docstring.params[0].type_name is None
assert docstring.params[0].description == "description 1"
assert not docstring.params[0].is_optional
assert docstring.params[1].arg_name == "priority"
assert docstring.params[1].type_name == "int"
assert docstring.params[1].description == "description 2"
assert not docstring.params[1].is_optional
assert docstring.params[2].arg_name == "sender"
assert docstring.params[2].type_name == "str"
assert docstring.params[2].description == "description 3"
assert docstring.params[2].is_optional
assert docstring.params[3].arg_name == "ratio"
assert docstring.params[3].type_name == "Optional[float]"
assert docstring.params[3].description == "description 4"
assert docstring.params[3].is_optional
docstring = parse(
"""
Short description
Parameters
----------
name
description 1
with multi-line text
priority : int
description 2
"""
)
assert len(docstring.params) == 2
assert docstring.params[0].arg_name == "name"
assert docstring.params[0].type_name is None
assert docstring.params[0].description == (
"description 1\n" "with multi-line text"
)
assert docstring.params[1].arg_name == "priority"
assert docstring.params[1].type_name == "int"
assert docstring.params[1].description == "description 2"
def test_attributes() -> None:
docstring = parse("Short description")
assert len(docstring.params) == 0
docstring = parse(
"""
Short description
Attributes
----------
name
description 1
priority : int
description 2
sender : str, optional
description 3
ratio : Optional[float], optional
description 4
"""
)
assert len(docstring.params) == 4
assert docstring.params[0].arg_name == "name"
assert docstring.params[0].type_name is None
assert docstring.params[0].description == "description 1"
assert not docstring.params[0].is_optional
assert docstring.params[1].arg_name == "priority"
assert docstring.params[1].type_name == "int"
assert docstring.params[1].description == "description 2"
assert not docstring.params[1].is_optional
assert docstring.params[2].arg_name == "sender"
assert docstring.params[2].type_name == "str"
assert docstring.params[2].description == "description 3"
assert docstring.params[2].is_optional
assert docstring.params[3].arg_name == "ratio"
assert docstring.params[3].type_name == "Optional[float]"
assert docstring.params[3].description == "description 4"
assert docstring.params[3].is_optional
docstring = parse(
"""
Short description
Attributes
----------
name
description 1
with multi-line text
priority : int
description 2
"""
)
assert len(docstring.params) == 2
assert docstring.params[0].arg_name == "name"
assert docstring.params[0].type_name is None
assert docstring.params[0].description == (
"description 1\n" "with multi-line text"
)
assert docstring.params[1].arg_name == "priority"
assert docstring.params[1].type_name == "int"
assert docstring.params[1].description == "description 2"
def test_other_params() -> None:
docstring = parse(
"""
Short description
Other Parameters
----------------
only_seldom_used_keywords : type, optional
Explanation
common_parameters_listed_above : type, optional
Explanation
"""
)
assert len(docstring.meta) == 2
assert docstring.meta[0].args == [
"other_param",
"only_seldom_used_keywords",
]
assert docstring.meta[0].arg_name == "only_seldom_used_keywords"
assert docstring.meta[0].type_name == "type"
assert docstring.meta[0].is_optional
assert docstring.meta[0].description == "Explanation"
assert docstring.meta[1].args == [
"other_param",
"common_parameters_listed_above",
]
def test_yields() -> None:
docstring = parse(
"""
Short description
Yields
------
int
description
"""
)
assert len(docstring.meta) == 1
assert docstring.meta[0].args == ["yields"]
assert docstring.meta[0].type_name == "int"
assert docstring.meta[0].description == "description"
assert docstring.meta[0].return_name is None
assert docstring.meta[0].is_generator
def test_returns() -> None:
docstring = parse(
"""
Short description
"""
)
assert docstring.returns is None
docstring = parse(
"""
Short description
Returns
-------
type
"""
)
assert docstring.returns is not None
assert docstring.returns.type_name == "type"
assert docstring.returns.description is None
docstring = parse(
"""
Short description
Returns
-------
int
description
"""
)
assert docstring.returns is not None
assert docstring.returns.type_name == "int"
assert docstring.returns.description == "description"
docstring = parse(
"""
Returns
-------
Optional[Mapping[str, List[int]]]
A description: with a colon
"""
)
assert docstring.returns is not None
assert docstring.returns.type_name == "Optional[Mapping[str, List[int]]]"
assert docstring.returns.description == "A description: with a colon"
docstring = parse(
"""
Short description
Returns
-------
int
description
with much text
even some spacing
"""
)
assert docstring.returns is not None
assert docstring.returns.type_name == "int"
assert docstring.returns.description == (
"description\n" "with much text\n\n" "even some spacing"
)
def test_raises() -> None:
docstring = parse(
"""
Short description
"""
)
assert len(docstring.raises) == 0
docstring = parse(
"""
Short description
Raises
------
ValueError
description
"""
)
assert len(docstring.raises) == 1
assert docstring.raises[0].type_name == "ValueError"
assert docstring.raises[0].description == "description"
def test_warns() -> None:
docstring = parse(
"""
Short description
Warns
-----
UserWarning
description
"""
)
assert len(docstring.meta) == 1
assert docstring.meta[0].type_name == "UserWarning"
assert docstring.meta[0].description == "description"
def test_simple_sections() -> None:
docstring = parse(
"""
Short description
See Also
--------
something : some thing you can also see
actually, anything can go in this section
Warnings
--------
Here be dragons
Notes
-----
None of this is real
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
"""
)
assert len(docstring.meta) == 4
assert docstring.meta[0].args == ["see_also"]
assert docstring.meta[0].description == (
"something : some thing you can also see\n"
"actually, anything can go in this section"
)
assert docstring.meta[1].args == ["warnings"]
assert docstring.meta[1].description == "Here be dragons"
assert docstring.meta[2].args == ["notes"]
assert docstring.meta[2].description == "None of this is real"
assert docstring.meta[3].args == ["references"]
def test_examples() -> None:
docstring = parse(
"""
Short description
Examples
--------
long example
more here
"""
)
assert len(docstring.meta) == 1
assert docstring.meta[0].description == "long example\n\nmore here"
@pytest.mark.parametrize(
"source, expected_depr_version, expected_depr_desc",
[
(
"Short description\n\n.. deprecated:: 1.6.0\n This is busted!",
"1.6.0",
"This is busted!",
),
(
(
"Short description\n\n"
".. deprecated:: 1.6.0\n"
" This description has\n"
" multiple lines!"
),
"1.6.0",
"This description has\nmultiple lines!",
),
("Short description\n\n.. deprecated:: 1.6.0", "1.6.0", None),
(
"Short description\n\n.. deprecated::\n No version!",
None,
"No version!",
),
],
)
def test_deprecation(
source: str,
expected_depr_version: T.Optional[str],
expected_depr_desc: T.Optional[str],
) -> None:
docstring = parse(source)
assert docstring.deprecation is not None
assert docstring.deprecation.version == expected_depr_version
assert docstring.deprecation.description == expected_depr_desc
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2021\n# Leandro Toledo de Souza <devs@python-telegram-bot.org>\n#\n# This program is free softw... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2021\n# Leandro Toledo de Souza <devs@python-telegram-bot.org>\n#\n# This progr... | ```python
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import os
from pathlib import Path
import pytest
from flaky import flaky
from telegram import PhotoSize, Animation, Voice, TelegramError, MessageEntity, Bot
from telegram.error import BadRequest
from telegram.utils.helpers import escape_markdown
from tests.conftest import check_shortcut_call, check_shortcut_signature, check_defaults_handling
@pytest.fixture(scope='function')
def animation_file():
f = open('tests/data/game.gif', 'rb')
yield f
f.close()
@pytest.fixture(scope='class')
def animation(bot, chat_id):
with open('tests/data/game.gif', 'rb') as f:
return bot.send_animation(
chat_id, animation=f, timeout=50, thumb=open('tests/data/thumb.jpg', 'rb')
).animation
class TestAnimation:
animation_file_id = 'CgADAQADngIAAuyVeEez0xRovKi9VAI'
animation_file_unique_id = 'adc3145fd2e84d95b64d68eaa22aa33e'
width = 320
height = 180
duration = 1
# animation_file_url = 'https://python-telegram-bot.org/static/testfiles/game.gif'
# Shortened link, the above one is cached with the wrong duration.
animation_file_url = 'http://bit.ly/2L18jua'
file_name = 'game.gif.mp4'
mime_type = 'video/mp4'
file_size = 4127
caption = "Test *animation*"
def test_slot_behaviour(self, animation, recwarn, mro_slots):
for attr in animation.__slots__:
assert getattr(animation, attr, 'err') != 'err', f"got extra slot '{attr}'"
assert not animation.__dict__, f"got missing slot(s): {animation.__dict__}"
assert len(mro_slots(animation)) == len(set(mro_slots(animation))), "duplicate slot"
animation.custom, animation.file_name = 'should give warning', self.file_name
assert len(recwarn) == 1 and 'custom' in str(recwarn[0].message), recwarn.list
def test_creation(self, animation):
assert isinstance(animation, Animation)
assert isinstance(animation.file_id, str)
assert isinstance(animation.file_unique_id, str)
assert animation.file_id != ''
assert animation.file_unique_id != ''
def test_expected_values(self, animation):
assert animation.file_size == self.file_size
assert animation.mime_type == self.mime_type
assert animation.file_name == self.file_name
assert isinstance(animation.thumb, PhotoSize)
@flaky(3, 1)
def test_send_all_args(self, bot, chat_id, animation_file, animation, thumb_file):
message = bot.send_animation(
chat_id,
animation_file,
duration=self.duration,
width=self.width,
height=self.height,
caption=self.caption,
parse_mode='Markdown',
disable_notification=False,
thumb=thumb_file,
)
assert isinstance(message.animation, Animation)
assert isinstance(message.animation.file_id, str)
assert isinstance(message.animation.file_unique_id, str)
assert message.animation.file_id != ''
assert message.animation.file_unique_id != ''
assert message.animation.file_name == animation.file_name
assert message.animation.mime_type == animation.mime_type
assert message.animation.file_size == animation.file_size
assert message.animation.thumb.width == self.width
assert message.animation.thumb.height == self.height
@flaky(3, 1)
def test_send_animation_custom_filename(self, bot, chat_id, animation_file, monkeypatch):
def make_assertion(url, data, **kwargs):
return data['animation'].filename == 'custom_filename'
monkeypatch.setattr(bot.request, 'post', make_assertion)
assert bot.send_animation(chat_id, animation_file, filename='custom_filename')
monkeypatch.delattr(bot.request, 'post')
@flaky(3, 1)
def test_get_and_download(self, bot, animation):
new_file = bot.get_file(animation.file_id)
assert new_file.file_size == self.file_size
assert new_file.file_id == animation.file_id
assert new_file.file_path.startswith('https://')
new_file.download('game.gif')
assert os.path.isfile('game.gif')
@flaky(3, 1)
def test_send_animation_url_file(self, bot, chat_id, animation):
message = bot.send_animation(
chat_id=chat_id, animation=self.animation_file_url, caption=self.caption
)
assert message.caption == self.caption
assert isinstance(message.animation, Animation)
assert isinstance(message.animation.file_id, str)
assert isinstance(message.animation.file_unique_id, str)
assert message.animation.file_id != ''
assert message.animation.file_unique_id != ''
assert message.animation.duration == animation.duration
assert message.animation.file_name == animation.file_name
assert message.animation.mime_type == animation.mime_type
assert message.animation.file_size == animation.file_size
@flaky(3, 1)
def test_send_animation_caption_entities(self, bot, chat_id, animation):
test_string = 'Italic Bold Code'
entities = [
MessageEntity(MessageEntity.ITALIC, 0, 6),
MessageEntity(MessageEntity.ITALIC, 7, 4),
MessageEntity(MessageEntity.ITALIC, 12, 4),
]
message = bot.send_animation(
chat_id, animation, caption=test_string, caption_entities=entities
)
assert message.caption == test_string
assert message.caption_entities == entities
@flaky(3, 1)
@pytest.mark.parametrize('default_bot', [{'parse_mode': 'Markdown'}], indirect=True)
def test_send_animation_default_parse_mode_1(self, default_bot, chat_id, animation_file):
test_string = 'Italic Bold Code'
test_markdown_string = '_Italic_ *Bold* `Code`'
message = default_bot.send_animation(chat_id, animation_file, caption=test_markdown_string)
assert message.caption_markdown == test_markdown_string
assert message.caption == test_string
@flaky(3, 1)
@pytest.mark.parametrize('default_bot', [{'parse_mode': 'Markdown'}], indirect=True)
def test_send_animation_default_parse_mode_2(self, default_bot, chat_id, animation_file):
test_markdown_string = '_Italic_ *Bold* `Code`'
message = default_bot.send_animation(
chat_id, animation_file, caption=test_markdown_string, parse_mode=None
)
assert message.caption == test_markdown_string
assert message.caption_markdown == escape_markdown(test_markdown_string)
@flaky(3, 1)
@pytest.mark.parametrize('default_bot', [{'parse_mode': 'Markdown'}], indirect=True)
def test_send_animation_default_parse_mode_3(self, default_bot, chat_id, animation_file):
test_markdown_string = '_Italic_ *Bold* `Code`'
message = default_bot.send_animation(
chat_id, animation_file, caption=test_markdown_string, parse_mode='HTML'
)
assert message.caption == test_markdown_string
assert message.caption_markdown == escape_markdown(test_markdown_string)
def test_send_animation_local_files(self, monkeypatch, bot, chat_id):
# For just test that the correct paths are passed as we have no local bot API set up
test_flag = False
expected = (Path.cwd() / 'tests/data/telegram.jpg/').as_uri()
file = 'tests/data/telegram.jpg'
def make_assertion(_, data, *args, **kwargs):
nonlocal test_flag
test_flag = data.get('animation') == expected and data.get('thumb') == expected
monkeypatch.setattr(bot, '_post', make_assertion)
bot.send_animation(chat_id, file, thumb=file)
assert test_flag
monkeypatch.delattr(bot, '_post')
@flaky(3, 1)
@pytest.mark.parametrize(
'default_bot,custom',
[
({'allow_sending_without_reply': True}, None),
({'allow_sending_without_reply': False}, None),
({'allow_sending_without_reply': False}, True),
],
indirect=['default_bot'],
)
def test_send_animation_default_allow_sending_without_reply(
self, default_bot, chat_id, animation, custom
):
reply_to_message = default_bot.send_message(chat_id, 'test')
reply_to_message.delete()
if custom is not None:
message = default_bot.send_animation(
chat_id,
animation,
allow_sending_without_reply=custom,
reply_to_message_id=reply_to_message.message_id,
)
assert message.reply_to_message is None
elif default_bot.defaults.allow_sending_without_reply:
message = default_bot.send_animation(
chat_id, animation, reply_to_message_id=reply_to_message.message_id
)
assert message.reply_to_message is None
else:
with pytest.raises(BadRequest, match='message not found'):
default_bot.send_animation(
chat_id, animation, reply_to_message_id=reply_to_message.message_id
)
@flaky(3, 1)
def test_resend(self, bot, chat_id, animation):
message = bot.send_animation(chat_id, animation.file_id)
assert message.animation == animation
def test_send_with_animation(self, monkeypatch, bot, chat_id, animation):
def test(url, data, **kwargs):
return data['animation'] == animation.file_id
monkeypatch.setattr(bot.request, 'post', test)
message = bot.send_animation(animation=animation, chat_id=chat_id)
assert message
def test_de_json(self, bot, animation):
json_dict = {
'file_id': self.animation_file_id,
'file_unique_id': self.animation_file_unique_id,
'width': self.width,
'height': self.height,
'duration': self.duration,
'thumb': animation.thumb.to_dict(),
'file_name': self.file_name,
'mime_type': self.mime_type,
'file_size': self.file_size,
}
animation = Animation.de_json(json_dict, bot)
assert animation.file_id == self.animation_file_id
assert animation.file_unique_id == self.animation_file_unique_id
assert animation.file_name == self.file_name
assert animation.mime_type == self.mime_type
assert animation.file_size == self.file_size
def test_to_dict(self, animation):
animation_dict = animation.to_dict()
assert isinstance(animation_dict, dict)
assert animation_dict['file_id'] == animation.file_id
assert animation_dict['file_unique_id'] == animation.file_unique_id
assert animation_dict['width'] == animation.width
assert animation_dict['height'] == animation.height
assert animation_dict['duration'] == animation.duration
assert animation_dict['thumb'] == animation.thumb.to_dict()
assert animation_dict['file_name'] == animation.file_name
assert animation_dict['mime_type'] == animation.mime_type
assert animation_dict['file_size'] == animation.file_size
@flaky(3, 1)
def test_error_send_empty_file(self, bot, chat_id):
animation_file = open(os.devnull, 'rb')
with pytest.raises(TelegramError):
bot.send_animation(chat_id=chat_id, animation=animation_file)
@flaky(3, 1)
def test_error_send_empty_file_id(self, bot, chat_id):
with pytest.raises(TelegramError):
bot.send_animation(chat_id=chat_id, animation='')
def test_error_send_without_required_args(self, bot, chat_id):
with pytest.raises(TypeError):
bot.send_animation(chat_id=chat_id)
def test_get_file_instance_method(self, monkeypatch, animation):
def make_assertion(*_, **kwargs):
return kwargs['file_id'] == animation.file_id
assert check_shortcut_signature(Animation.get_file, Bot.get_file, ['file_id'], [])
assert check_shortcut_call(animation.get_file, animation.bot, 'get_file')
assert check_defaults_handling(animation.get_file, animation.bot)
monkeypatch.setattr(animation.bot, 'get_file', make_assertion)
assert animation.get_file()
def test_equality(self):
a = Animation(
self.animation_file_id,
self.animation_file_unique_id,
self.height,
self.width,
self.duration,
)
b = Animation('', self.animation_file_unique_id, self.height, self.width, self.duration)
d = Animation('', '', 0, 0, 0)
e = Voice(self.animation_file_id, self.animation_file_unique_id, 0)
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n# -*- coding: utf-8 -*-\n#------------------------------------------------------------\n# pelisalacarta - XBMC Plugin\n# XBMC Tools\n# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\n#-----------------------------------... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#------------------------------------------------------------\n# pelisalacarta - XBMC Plugin\n# XBMC Tools\n# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\n#-------------------... | ```python
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# XBMC Tools
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urllib, urllib2
import xbmc
import xbmcgui
import xbmcplugin
import sys
import os
from servers import servertools
from core import config
from core import logger
from core.item import Item
from core import suscription
# Esto permite su ejecución en modo emulado
try:
pluginhandle = int( sys.argv[ 1 ] )
except:
pluginhandle = ""
DEBUG = False
def add_new_folder( item , totalItems=0 ):
if item.fulltitle=="":
item.fulltitle=item.title
contextCommands = []
ok = False
try:
item.context = urllib.unquote_plus(item.context)
except:
item.context=""
if "|" in item.context:
item.context = item.context.split("|")
if DEBUG:
try:
logger.info('[xbmctools.py] addnewfolder( "'+item.channel+'" , "'+item.action+'" , "'+item.category+'" , "'+item.date+'", "'+item.title+'" , "' + url + '" , "'+item.thumbnail+'" , "'+item.plot+'")" , "'+item.show+'")"')
except:
logger.info('[xbmctools.py] addnewfolder(<unicode>)')
listitem = xbmcgui.ListItem( item.title, iconImage="DefaultFolder.png", thumbnailImage=item.thumbnail )
listitem.setInfo( "video", { "Title" : item.title, "Plot" : item.plot, "Studio" : item.channel, "Genre" : item.category, "Year" : item.date[0:4] } )
if item.fanart!="":
listitem.setProperty('fanart_image',item.fanart)
xbmcplugin.setPluginFanart(pluginhandle, item.fanart)
#Realzamos un quote sencillo para evitar problemas con títulos unicode
# title = title.replace("&","%26").replace("+","%2B").replace("%","%25")
try:
item.title = item.title.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are.
except:
pass
itemurl = '%s?channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&show=%s' % ( sys.argv[ 0 ] , item.channel , item.action , urllib.quote_plus( item.category ) , urllib.quote_plus(item.title) , urllib.quote_plus(item.fulltitle) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( item.plot ) , urllib.quote_plus( item.extra ) , urllib.quote_plus( item.show ))
logger.info("itemurl="+itemurl)
if item.context=="program":
if item.is_favorite=="true":
query = '%s?channel=api_programas&action=remove_from_favorites&url=%s' % ( sys.argv[ 0 ] , item.id)
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Quitar programa de favoritos",command) )
else:
query = '%s?channel=api_programas&action=add_to_favorites&url=%s' % ( sys.argv[ 0 ] , item.id)
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Añadir programa a favoritos",command) )
#query = '%s?channel=api_programas&action=add_to_hidden&url=%s' % ( sys.argv[ 0 ] , item.id)
#command = "XBMC.RunPlugin("+query+")"
#contextCommands.append( ("Ocultar este programa",command) )
if not suscription.already_suscribed(item):
query = '%s?channel=%s&action=subscribe_to_program&title=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&show=%s' % ( sys.argv[ 0 ] , item.channel , urllib.quote_plus(item.title) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( item.plot ) , urllib.quote_plus( item.extra ) , urllib.quote_plus( item.title ))
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Suscribirme a este programa" , command) )
else:
query = '%s?channel=%s&action=unsubscribe_to_program&title=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&show=%s' % ( sys.argv[ 0 ] , item.channel , urllib.quote_plus(item.title) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( item.plot ) , urllib.quote_plus( item.extra ) , urllib.quote_plus( item.title ))
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Quitar suscripción a este programa" , command) )
query = '%s?channel=%s&action=download_all_videos&title=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&show=%s' % ( sys.argv[ 0 ] , item.channel , urllib.quote_plus(item.title) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( item.plot ) , urllib.quote_plus( item.extra ) , urllib.quote_plus( item.title ))
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Descargar todos los vídeos" , command) )
#elif item.context=="hidden_program":
# query = '%s?channel=api_programas&action=remove_from_hidden&url=%s' % ( sys.argv[ 0 ] , item.id)
# command = "XBMC.RunPlugin("+query+")"
# contextCommands.append( ("No ocultar este programa",command) )
if config.get_platform()=="boxee":
#logger.info("Modo boxee")
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
else:
#logger.info("Modo xbmc")
if len(contextCommands) > 0:
listitem.addContextMenuItems ( contextCommands, replaceItems=True)
if totalItems == 0:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
else:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True, totalItems=totalItems)
return ok
def addnewvideo( canal , accion , category , server , title , url , thumbnail, plot ,Serie="",duration="",fanart="",IsPlayable='false',context = "", subtitle="", viewmode="", totalItems = 0, show="", password="", extra="",fulltitle="", size=""):
contextCommands = []
ok = False
try:
context = urllib.unquote_plus(context)
except:
context=""
if "|" in context:
context = context.split("|")
if DEBUG:
try:
logger.info('[xbmctools.py] addnewvideo( "'+canal+'" , "'+accion+'" , "'+category+'" , "'+server+'" , "'+title+'" , "' + url + '" , "'+thumbnail+'" , "'+plot+'")" , "'+Serie+'")"')
except:
logger.info('[xbmctools.py] addnewvideo(<unicode>)')
listitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail )
listitem.setInfo( "video", { "Title" : title, "Plot" : plot, "Duration" : duration, "Studio" : canal, "Genre" : category , "Size": size} )
if fanart!="":
logger.info("fanart :%s" %fanart)
listitem.setProperty('fanart_image',fanart)
xbmcplugin.setPluginFanart(pluginhandle, fanart)
if IsPlayable == 'true': #Esta opcion es para poder utilizar el xbmcplugin.setResolvedUrl()
listitem.setProperty('IsPlayable', 'true')
#listitem.setProperty('fanart_image',os.path.join(IMAGES_PATH, "cinetube.png"))
if "1" in context: #El uno añade al menu contextual la opcion de guardar en megalive un canal a favoritos
addItemCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&show=%s&password=%s&extradata=%s)" % ( sys.argv[ 0 ] , canal , "saveChannelFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie, urllib.quote_plus(show), urllib.quote_plus( password) , urllib.quote_plus(extra) )
contextCommands.append((config.get_localized_string(30301),addItemCommand))
if "2" in context:#El dos añade al menu contextual la opciones de eliminar y/o renombrar un canal en favoritos
addItemCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&show=%s&password=%s&extradata=%s)" % ( sys.argv[ 0 ] , canal , "deleteSavedChannel" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie, urllib.quote_plus( show), urllib.quote_plus( password) , urllib.quote_plus(extra) )
contextCommands.append((config.get_localized_string(30302),addItemCommand))
addItemCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&show=%s&password=%s&extradata=%s)" % ( sys.argv[ 0 ] , canal , "renameChannelTitle" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie, urllib.quote_plus( show),urllib.quote_plus( password) , urllib.quote_plus(extra) )
contextCommands.append((config.get_localized_string(30303),addItemCommand))
if "6" in context:# Ver canal en vivo en justintv
justinCommand = "XBMC.PlayMedia(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "playVideo" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) )
contextCommands.append((config.get_localized_string(30410),justinCommand))
if "7" in context:# Listar videos archivados en justintv
justinCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "listarchives" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) )
contextCommands.append((config.get_localized_string(30409),justinCommand))
if "8" in context:# Añadir canal a favoritos justintv
justinCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "addToFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) )
contextCommands.append((config.get_localized_string(30406),justinCommand))
if "9" in context:# Remover canal de favoritos justintv
justinCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "removeFromFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) )
contextCommands.append((config.get_localized_string(30407),justinCommand))
if len (contextCommands) > 0:
listitem.addContextMenuItems ( contextCommands, replaceItems=False)
try:
title = title.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are.
plot = plot.encode ("utf-8")
except:
pass
itemurl = '%s?channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&subtitle=%s&show=%s&viewmode=%s&extradata=%s' % ( sys.argv[ 0 ] , canal , accion , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie , urllib.quote_plus(subtitle), urllib.quote_plus( show ) ,urllib.quote_plus(viewmode), urllib.quote_plus(extra) )
#logger.info("[xbmctools.py] itemurl=%s" % itemurl)
if totalItems == 0:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url=itemurl, listitem=listitem, isFolder=False)
else:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url=itemurl, listitem=listitem, isFolder=False, totalItems=totalItems)
return ok
def addthumbnailfolder( canal , scrapedtitle , scrapedurl , scrapedthumbnail , accion ):
logger.info('[xbmctools.py] addthumbnailfolder( "'+scrapedtitle+'" , "' + scrapedurl + '" , "'+scrapedthumbnail+'" , "'+accion+'")"')
listitem = xbmcgui.ListItem( scrapedtitle, iconImage="DefaultFolder.png", thumbnailImage=scrapedthumbnail )
itemurl = '%s?channel=%s&action=%s&category=%s&url=%s&title=%s&thumbnail=%s' % ( sys.argv[ 0 ] , canal , accion , urllib.quote_plus( scrapedtitle ) , urllib.quote_plus( scrapedurl ) , urllib.quote_plus( scrapedtitle ) , urllib.quote_plus( scrapedthumbnail ) )
xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
def addfolder( canal , nombre , url , accion ):
logger.info('[xbmctools.py] addfolder( "'+nombre+'" , "' + url + '" , "'+accion+'")"')
listitem = xbmcgui.ListItem( nombre , iconImage="DefaultFolder.png")
itemurl = '%s?channel=%s&action=%s&category=%s&url=%s' % ( sys.argv[ 0 ] , canal , accion , urllib.quote_plus(nombre) , urllib.quote_plus(url) )
xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
def addvideo( canal , nombre , url , category , server , Serie=""):
logger.info('[xbmctools.py] addvideo( "'+nombre+'" , "' + url + '" , "'+server+ '" , "'+Serie+'")"')
listitem = xbmcgui.ListItem( nombre, iconImage="DefaultVideo.png" )
listitem.setInfo( "video", { "Title" : nombre, "Plot" : nombre } )
itemurl = '%s?channel=%s&action=play&category=%s&url=%s&server=%s&title=%s&Serie=%s' % ( sys.argv[ 0 ] , canal , category , urllib.quote_plus(url) , server , urllib.quote_plus( nombre ) , Serie)
xbmcplugin.addDirectoryItem( handle=pluginhandle, url=itemurl, listitem=listitem, isFolder=False)
# FIXME: ¿Por qué no pasar el item en lugar de todos los parámetros?
def play_video(channel="",server="",url="",category="",title="", thumbnail="",plot="",extra="",desdefavoritos=False,desdedescargados=False,desderrordescargas=False,strmfile=False,Serie="",subtitle="", video_password="",fulltitle=""):
from servers import servertools
import sys
import xbmcgui
if url.startswith("http://"):
url = url.replace(" ","%20")
try:
logger.info("[xbmctools.py] play_video(channel=%s, server=%s, url=%s, category=%s, title=%s, thumbnail=%s, plot=%s, desdefavoritos=%s, desdedescargados=%s, desderrordescargas=%s, strmfile=%s, Serie=%s, subtitle=%s" % (channel,server,url,category,title,thumbnail,plot,desdefavoritos,desdedescargados,desderrordescargas,strmfile,Serie,subtitle))
except:
pass
try:
server = server.lower()
except:
server = ""
if server=="":
server="directo"
try:
from core import descargas
download_enable=True
except:
download_enable=False
view = False
# Abre el diálogo de selección
opciones = []
default_action = config.get_setting("default_action")
logger.info("default_action="+default_action)
# Si no es el modo normal, no muestra el diálogo porque cuelga XBMC
muestra_dialogo = (config.get_setting("player_mode")=="0" and not strmfile)
# Extrae las URL de los vídeos, y si no puedes verlo te dice el motivo
video_urls,puedes,motivo = servertools.resolve_video_urls_for_playing(server,url,video_password,muestra_dialogo)
# Si puedes ver el vídeo, presenta las opciones
if puedes:
for video_url in video_urls:
opciones.append(config.get_localized_string(30151) + " " + video_url[0])
if server=="local":
opciones.append(config.get_localized_string(30164))
else:
if download_enable:
opcion = config.get_localized_string(30153)
opciones.append(opcion) # "Descargar"
if channel=="favoritos":
opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"
else:
opciones.append(config.get_localized_string(30155)) # "Añadir a favoritos"
#if not strmfile:
# opciones.append(config.get_localized_string(30161)) # "Añadir a Biblioteca"
if download_enable:
if channel!="descargas":
opciones.append(config.get_localized_string(30157)) # "Añadir a lista de descargas"
else:
if category=="errores":
opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
opciones.append(config.get_localized_string(30160)) # "Pasar de nuevo a lista de descargas"
else:
opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"
#opciones.append(config.get_localized_string(30158)) # "Enviar a JDownloader"
if default_action=="3":
seleccion = len(opciones)-1
# Busqueda de trailers en youtube
#if not channel in ["Trailer","ecarteleratrailers"]:
# opciones.append(config.get_localized_string(30162)) # "Buscar Trailer"
# Si no puedes ver el vídeo te informa
else:
import xbmcgui
if server!="":
advertencia = xbmcgui.Dialog()
if "<br/>" in motivo:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo.split("<br/>")[0],motivo.split("<br/>")[1],url)
else:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo,url)
else:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...","El servidor donde está alojado no está","soportado en pelisalacarta todavía",url)
if channel=="favoritos":
opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"
if channel=="descargas":
if category=="errores":
opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
else:
opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"
if len(opciones)==0:
return
# Si la accion por defecto es "Preguntar", pregunta
if default_action=="0":
import xbmcgui
dia = xbmcgui.Dialog()
seleccion = dia.select(config.get_localized_string(30163), opciones) # "Elige una opción"
#dia.close()
elif default_action=="1":
seleccion = 0
elif default_action=="2":
seleccion = len(video_urls)-1
elif default_action=="3":
seleccion = seleccion
else:
seleccion=0
logger.info("seleccion=%d" % seleccion)
logger.info("seleccion=%s" % opciones[seleccion])
# No ha elegido nada, lo más probable porque haya dado al ESC
if seleccion==-1:
#Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
listitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
import sys
xbmcplugin.setResolvedUrl(int(sys.argv[ 1 ]),False,listitem) # JUR Added
#if config.get_setting("subtitulo") == "true":
# config.set_setting("subtitulo", "false")
return
if opciones[seleccion]==config.get_localized_string(30158): # "Enviar a JDownloader"
#d = {"web": url}urllib.urlencode(d)
from core import scrapertools
data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+url+ " " +thumbnail)
return
elif opciones[seleccion]==config.get_localized_string(30164): # Borrar archivo en descargas
# En "extra" está el nombre del fichero en favoritos
import os
os.remove( url )
if os.path.exists(url[:-4]+".tbn"):
os.remove( url[:-4]+".tbn" )
if os.path.exists(url[:-4]+".nfo"):
os.remove( url[:-4]+".nfo" )
xbmc.executebuiltin( "Container.Refresh" )
return
# Ha elegido uno de los vídeos
elif seleccion < len(video_urls):
mediaurl = video_urls[seleccion][1]
if len(video_urls[seleccion])>2:
wait_time = video_urls[seleccion][2]
else:
wait_time = 0
if len(video_urls[seleccion])>3:
use_download_and_play = (video_urls[seleccion][3]=="download_and_play")
else:
use_download_and_play = False
view = True
# Descargar
elif opciones[seleccion]==config.get_localized_string(30153): # "Descargar"
# El vídeo de más calidad es el último
mediaurl = video_urls[len(video_urls)-1][1]
# Si no quiere usar filenium para descargas, coge el anterior
if config.get_setting("filenium_for_download")=="false" and video_urls[len(video_urls)-1][0]=="[filenium]":
mediaurl = video_urls[len(video_urls)-2][1]
from core import downloadtools
keyboard = xbmc.Keyboard(fulltitle)
keyboard.doModal()
if (keyboard.isConfirmed()):
title = keyboard.getText()
downloadtools.downloadtitle(mediaurl,title)
return
elif opciones[seleccion]==config.get_localized_string(30154): #"Quitar de favoritos"
from core import favoritos
# En "extra" está el nombre del fichero en favoritos
favoritos.deletebookmark(urllib.unquote_plus( extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30102) , title , config.get_localized_string(30105)) # 'Se ha quitado de favoritos'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30159): #"Borrar descarga definitivamente"
from core import descargas
descargas.delete_error_bookmark(urllib.unquote_plus( extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30106)) # 'Se ha quitado de la lista'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30160): #"Pasar de nuevo a lista de descargas":
from core import descargas
descargas.mover_descarga_error_a_pendiente(urllib.unquote_plus( extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30107)) # 'Ha pasado de nuevo a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30155): #"Añadir a favoritos":
from core import favoritos
from core import downloadtools
keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(fulltitle)+" ["+channel+"]")
keyboard.doModal()
if keyboard.isConfirmed():
title = keyboard.getText()
favoritos.savebookmark(titulo=title,url=url,thumbnail=thumbnail,server=server,plot=plot,fulltitle=title)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30102) , title , config.get_localized_string(30108)) # 'se ha añadido a favoritos'
return
elif opciones[seleccion]==config.get_localized_string(30156): #"Quitar de lista de descargas":
from core import descargas
# La categoría es el nombre del fichero en la lista de descargas
descargas.deletebookmark((urllib.unquote_plus( extra )))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30106)) # 'Se ha quitado de lista de descargas'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30157): #"Añadir a lista de descargas":
from core import descargas
from core import downloadtools
keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(fulltitle))
keyboard.doModal()
if keyboard.isConfirmed():
title = keyboard.getText()
descargas.savebookmark(titulo=title,url=url,thumbnail=thumbnail,server=server,plot=plot,fulltitle=title)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30109)) # 'se ha añadido a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30161): #"Añadir a Biblioteca": # Library
from platformcode.xbmc import library
titulo = fulltitle
if fulltitle=="":
titulo = title
library.savelibrary(titulo,url,thumbnail,server,plot,canal=channel,category=category,Serie=Serie)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , fulltitle , config.get_localized_string(30135)) # 'se ha añadido a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30162): #"Buscar Trailer":
config.set_setting("subtitulo", "false")
import sys
xbmc.executebuiltin("Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s)" % ( sys.argv[ 0 ] , "trailertools" , "buscartrailer" , urllib.quote_plus( category ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) , server ))
return
# Si no hay mediaurl es porque el vídeo no está :)
logger.info("[xbmctools.py] mediaurl="+mediaurl)
if mediaurl=="":
logger.info("b1")
if server == "unknown":
alertUnsopportedServer()
else:
alertnodisponibleserver(server)
return
# Si hay un tiempo de espera (como en megaupload), lo impone ahora
if wait_time>0:
logger.info("b2")
continuar = handle_wait(wait_time,server,"Cargando vídeo...")
if not continuar:
return
# Obtención datos de la Biblioteca (solo strms que estén en la biblioteca)
import xbmcgui
if strmfile:
logger.info("b3")
xlistitem = getLibraryInfo(mediaurl)
else:
logger.info("b4")
try:
xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail, path=mediaurl)
except:
xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
xlistitem.setInfo( "video", { "Title": title, "Plot" : plot , "Studio" : channel , "Genre" : category } )
# Descarga el subtitulo
if channel=="cuevana" and subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")):
logger.info("b5")
try:
import os
ficherosubtitulo = os.path.join( config.get_data_path(), 'subtitulo.srt' )
if os.path.exists(ficherosubtitulo):
try:
os.remove(ficherosubtitulo)
except IOError:
logger.info("Error al eliminar el archivo subtitulo.srt "+ficherosubtitulo)
raise
from core import scrapertools
data = scrapertools.cache_page(subtitle)
#print data
fichero = open(ficherosubtitulo,"w")
fichero.write(data)
fichero.close()
#from core import downloadtools
#downloadtools.downloadfile(subtitle, ficherosubtitulo )
except:
logger.info("Error al descargar el subtítulo")
# Lanza el reproductor
if strmfile: #Si es un fichero strm no hace falta el play
logger.info("b6")
import sys
xbmcplugin.setResolvedUrl(int(sys.argv[ 1 ]),True,xlistitem)
#if subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")):
# logger.info("[xbmctools.py] Con subtitulos")
# setSubtitles()
else:
logger.info("b7")
if use_download_and_play or config.get_setting("player_mode")=="3":
logger.info("b11")
import download_and_play
# El canal exige usar download_and_play, pero el usuario no lo ha elegido -> le quitamos los diálogos
if use_download_and_play and config.get_setting("player_mode")!="3":
download_and_play.download_and_play( mediaurl , "download_and_play.tmp" , config.get_setting("downloadpath") , show_dialog=False )
else:
download_and_play.download_and_play( mediaurl , "download_and_play.tmp" , config.get_setting("downloadpath") )
return
elif config.get_setting("player_mode")=="0":
logger.info("b8")
# Añadimos el listitem a una lista de reproducción (playlist)
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
playlist.add( mediaurl, xlistitem )
# Reproduce
playersettings = config.get_setting('player_type')
logger.info("[xbmctools.py] playersettings="+playersettings)
player_type = xbmc.PLAYER_CORE_AUTO
if playersettings == "0":
player_type = xbmc.PLAYER_CORE_AUTO
logger.info("[xbmctools.py] PLAYER_CORE_AUTO")
elif playersettings == "1":
player_type = xbmc.PLAYER_CORE_MPLAYER
logger.info("[xbmctools.py] PLAYER_CORE_MPLAYER")
elif playersettings == "2":
player_type = xbmc.PLAYER_CORE_DVDPLAYER
logger.info("[xbmctools.py] PLAYER_CORE_DVDPLAYER")
xbmcPlayer = xbmc.Player( player_type )
xbmcPlayer.play(playlist)
if channel=="cuevana" and subtitle!="":
logger.info("subtitulo="+subtitle)
if subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")):
logger.info("[xbmctools.py] Con subtitulos")
setSubtitles()
elif config.get_setting("player_mode")=="1":
logger.info("b9")
#xlistitem.setProperty('IsPlayable', 'true')
#xlistitem.setProperty('path', mediaurl)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=mediaurl))
elif config.get_setting("player_mode")=="2":
logger.info("b10")
xbmc.executebuiltin( "PlayMedia("+mediaurl+")" )
if (config.get_setting("subtitulo") == "true") and view:
logger.info("b11")
from core import subtitletools
wait2second()
subtitletools.set_Subtitle()
if subtitle!="":
xbmc.Player().setSubtitles(subtitle)
def handle_wait(time_to_wait,title,text):
logger.info ("[xbmctools.py] handle_wait(time_to_wait=%d)" % time_to_wait)
import xbmc,xbmcgui
espera = xbmcgui.DialogProgress()
ret = espera.create(' '+title)
secs=0
percent=0
increment = int(100 / time_to_wait)
cancelled = False
while secs < time_to_wait:
secs = secs + 1
percent = increment*secs
secs_left = str((time_to_wait - secs))
remaining_display = ' Espera '+secs_left+' segundos para que comience el vídeo...'
espera.update(percent,' '+text,remaining_display)
xbmc.sleep(1000)
if (espera.iscanceled()):
cancelled = True
break
if cancelled == True:
logger.info ('Espera cancelada')
return False
else:
logger.info ('Espera finalizada')
return True
def getLibraryInfo (mediaurl):
'''Obtiene información de la Biblioteca si existe (ficheros strm) o de los parámetros
'''
if DEBUG:
logger.info('[xbmctools.py] playlist OBTENCIÓN DE DATOS DE BIBLIOTECA')
# Información básica
label = xbmc.getInfoLabel( 'listitem.label' )
label2 = xbmc.getInfoLabel( 'listitem.label2' )
iconImage = xbmc.getInfoImage( 'listitem.icon' )
thumbnailImage = xbmc.getInfoImage( 'listitem.Thumb' ) #xbmc.getInfoLabel( 'listitem.thumbnailImage' )
if DEBUG:
logger.info ("[xbmctools.py]getMediaInfo: label = " + label)
logger.info ("[xbmctools.py]getMediaInfo: label2 = " + label2)
logger.info ("[xbmctools.py]getMediaInfo: iconImage = " + iconImage)
logger.info ("[xbmctools.py]getMediaInfo: thumbnailImage = " + thumbnailImage)
# Creación de listitem
listitem = xbmcgui.ListItem(label, label2, iconImage, thumbnailImage, mediaurl)
# Información adicional
lista = [
('listitem.genre', 's'), #(Comedy)
('listitem.year', 'i'), #(2009)
('listitem.episode', 'i'), #(4)
('listitem.season', 'i'), #(1)
('listitem.top250', 'i'), #(192)
('listitem.tracknumber', 'i'), #(3)
('listitem.rating', 'f'), #(6.4) - range is 0..10
# ('listitem.watched', 'd'), # depreciated. use playcount instead
('listitem.playcount', 'i'), #(2) - number of times this item has been played
# ('listitem.overlay', 'i'), #(2) - range is 0..8. See GUIListItem.h for values
('listitem.overlay', 's'), #JUR - listitem devuelve un string, pero addinfo espera un int. Ver traducción más abajo
('listitem.cast', 's'), # (Michal C. Hall) - List concatenated into a string
('listitem.castandrole', 's'), #(Michael C. Hall|Dexter) - List concatenated into a string
('listitem.director', 's'), #(Dagur Kari)
('listitem.mpaa', 's'), #(PG-13)
('listitem.plot', 's'), #(Long Description)
('listitem.plotoutline', 's'), #(Short Description)
('listitem.title', 's'), #(Big Fan)
('listitem.duration', 's'), #(3)
('listitem.studio', 's'), #(Warner Bros.)
('listitem.tagline', 's'), #(An awesome movie) - short description of movie
('listitem.writer', 's'), #(Robert D. Siegel)
('listitem.tvshowtitle', 's'), #(Heroes)
('listitem.premiered', 's'), #(2005-03-04)
('listitem.status', 's'), #(Continuing) - status of a TVshow
('listitem.code', 's'), #(tt0110293) - IMDb code
('listitem.aired', 's'), #(2008-12-07)
('listitem.credits', 's'), #(Andy Kaufman) - writing credits
('listitem.lastplayed', 's'), #(%Y-%m-%d %h
('listitem.album', 's'), #(The Joshua Tree)
('listitem.votes', 's'), #(12345 votes)
('listitem.trailer', 's'), #(/home/user/trailer.avi)
]
# Obtenemos toda la info disponible y la metemos en un diccionario
# para la función setInfo.
infodict = dict()
for label,tipo in lista:
key = label.split('.',1)[1]
value = xbmc.getInfoLabel( label )
if value != "":
if DEBUG:
logger.info ("[xbmctools.py]getMediaInfo: "+key+" = " + value) #infoimage=infolabel
if tipo == 's':
infodict[key]=value
elif tipo == 'i':
infodict[key]=int(value)
elif tipo == 'f':
infodict[key]=float(value)
#Transforma el valor de overlay de string a int.
if infodict.has_key('overlay'):
value = infodict['overlay'].lower()
if value.find('rar') > -1:
infodict['overlay'] = 1
elif value.find('zip')> -1:
infodict['overlay'] = 2
elif value.find('trained')> -1:
infodict['overlay'] = 3
elif value.find('hastrainer')> -1:
infodict['overlay'] = 4
elif value.find('locked')> -1:
infodict['overlay'] = 5
elif value.find('unwatched')> -1:
infodict['overlay'] = 6
elif value.find('watched')> -1:
infodict['overlay'] = 7
elif value.find('hd')> -1:
infodict['overlay'] = 8
else:
infodict.pop('overlay')
if len (infodict) > 0:
listitem.setInfo( "video", infodict )
return listitem
def alertnodisponible():
advertencia = xbmcgui.Dialog()
#'Vídeo no disponible'
#'No se han podido localizar videos en la página del canal'
resultado = advertencia.ok(config.get_localized_string(30055) , config.get_localized_string(30056))
def alertnodisponibleserver(server):
advertencia = xbmcgui.Dialog()
# 'El vídeo ya no está en %s' , 'Prueba en otro servidor o en otro canal'
resultado = advertencia.ok( config.get_localized_string(30055),(config.get_localized_string(30057)%server),config.get_localized_string(30058))
def alertUnsopportedServer():
advertencia = xbmcgui.Dialog()
# 'Servidor no soportado o desconocido' , 'Prueba en otro servidor o en otro canal'
resultado = advertencia.ok( config.get_localized_string(30065),config.get_localized_string(30058))
def alerterrorpagina():
advertencia = xbmcgui.Dialog()
#'Error en el sitio web'
#'No se puede acceder por un error en el sitio web'
resultado = advertencia.ok(config.get_localized_string(30059) , config.get_localized_string(30060))
def alertanomegauploadlow(server):
advertencia = xbmcgui.Dialog()
#'La calidad elegida no esta disponible', 'o el video ha sido borrado',
#'Prueba a reproducir en otra calidad'
resultado = advertencia.ok( config.get_localized_string(30055) , config.get_localized_string(30061) , config.get_localized_string(30062))
# AÑADIDO POR JUR. SOPORTE DE FICHEROS STRM
def playstrm(params,url,category):
'''Play para videos en ficheros strm
'''
logger.info("[xbmctools.py] playstrm url="+url)
title = unicode( xbmc.getInfoLabel( "ListItem.Title" ), "utf-8" )
thumbnail = urllib.unquote_plus( params.get("thumbnail") )
plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )
server = params["server"]
if (params.has_key("Serie")):
serie = params.get("Serie")
else:
serie = ""
if (params.has_key("subtitle")):
subtitle = params.get("subtitle")
else:
subtitle = ""
from core.item import Item
from core.subtitletools import saveSubtitleName
item = Item(title=title,show=serie)
saveSubtitleName(item)
play_video("Biblioteca pelisalacarta",server,url,category,title,thumbnail,plot,strmfile=True,Serie=serie,subtitle=subtitle)
def renderItems(itemlist, params, url, category,isPlayable='false'):
viewmode = "list"
if itemlist <> None:
for item in itemlist:
if item.category == "":
item.category = category
if item.fulltitle=="":
item.fulltitle=item.title
if item.fanart=="":
channel_fanart = os.path.join( config.get_runtime_path(), 'resources', 'images', 'fanart', item.channel+'.jpg')
if os.path.exists(channel_fanart):
item.fanart = channel_fanart
else:
item.fanart = os.path.join(config.get_runtime_path(),"fanart.jpg")
if item.folder :
add_new_folder( item , totalItems=len(itemlist) )
else:
if config.get_setting("player_mode")=="1": # SetResolvedUrl debe ser siempre "isPlayable = true"
isPlayable = "true"
if item.duration:
addnewvideo( item.channel , item.action , item.category , item.server, item.title , item.url , item.thumbnail , item.plot , "" , duration = item.duration , fanart = item.fanart, IsPlayable=isPlayable,context = item.context , subtitle=item.subtitle, totalItems = len(itemlist), show=item.show, password = item.password, extra = item.extra, fulltitle=item.fulltitle, size=item.size )
else:
addnewvideo( item.channel , item.action , item.category , item.server, item.title , item.url , item.thumbnail , item.plot, fanart = item.fanart, IsPlayable=isPlayable , context = item.context , subtitle = item.subtitle , totalItems = len(itemlist), show=item.show , password = item.password , extra=item.extra, fulltitle=item.fulltitle, size=item.size )
if item.viewmode!="list":
viewmode = item.viewmode
# Cierra el directorio
xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
if config.get_setting("forceview")=="true":
if viewmode=="list":
xbmcplugin.setContent(pluginhandle,"Movies")
xbmc.executebuiltin("Container.SetViewMode(50)")
elif viewmode=="movie_with_plot":
xbmcplugin.setContent(pluginhandle,"Movies")
xbmc.executebuiltin("Container.SetViewMode(504)")
elif viewmode=="movie":
xbmcplugin.setContent(pluginhandle,"Movies")
xbmc.executebuiltin("Container.SetViewMode(500)")
elif viewmode=="series":
xbmcplugin.setContent(pluginhandle,"tvshows")
xbmc.executebuiltin("Container.SetViewMode(504)")
elif viewmode=="episodes":
xbmcplugin.setContent(pluginhandle,"episodes")
xbmc.executebuiltin("Container.SetViewMode(504)")
xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def wait2second():
logger.info("[xbmctools.py] wait2second")
import time
contador = 0
while xbmc.Player().isPlayingVideo()==False:
logger.info("[xbmctools.py] setSubtitles: Waiting 2 seconds for video to start before setting subtitles")
time.sleep(2)
contador = contador + 1
if contador>10:
break
def setSubtitles():
logger.info("[xbmctools.py] setSubtitles")
import time
contador = 0
while xbmc.Player().isPlayingVideo()==False:
logger.info("[xbmctools.py] setSubtitles: Waiting 2 seconds for video to start before setting subtitles")
time.sleep(2)
contador = contador + 1
if contador>10:
break
subtitlefile = os.path.join( config.get_data_path(), 'subtitulo.srt' )
logger.info("[xbmctools.py] setting subtitle file %s" % subtitlefile)
xbmc.Player().setSubtitles(subtitlefile)
def trailer(item):
logger.info("[xbmctools.py] trailer")
config.set_setting("subtitulo", "false")
import sys
xbmc.executebuiltin("XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s)" % ( sys.argv[ 0 ] , "trailertools" , "buscartrailer" , urllib.quote_plus( item.category ) , urllib.quote_plus( item.fulltitle ) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( "" ) ))
return
def alert_no_puedes_ver_video(server,url,motivo):
import xbmcgui
if server!="":
advertencia = xbmcgui.Dialog()
if "<br/>" in motivo:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo.split("<br/>")[0],motivo.split("<br/>")[1],url)
else:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo,url)
else:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...","El servidor donde está alojado no está","soportado en pelisalacarta todavía",url)
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\nimport os\n\nfrom json import encoder\nfrom json import dumps\n\nfrom sklearn_porter.estimator.classifier.Classifier import Classifier\n\n\nclass MLPClassifier(Classifier):\n \"\"\"\n See also\n --------\n sklearn.neural_network.MLPClassifier\n\n ... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nimport os\n\nfrom json import encoder\nfrom json import dumps\n\nfrom sklearn_porter.estimator.classifier.Classifier import Classifier\n\n\nclass MLPClassifier(Classifier):\n \"\"\"\n See also\n --------\n sklearn.neural_network.MLP... | ```python
# -*- coding: utf-8 -*-
import os
from json import encoder
from json import dumps
from sklearn_porter.estimator.classifier.Classifier import Classifier
class MLPClassifier(Classifier):
"""
See also
--------
sklearn.neural_network.MLPClassifier
http://scikit-learn.org/stable/modules/generated/
sklearn.neural_network.MLPClassifier.html
"""
SUPPORTED_METHODS = ['predict']
# @formatter:off
TEMPLATES = {
'java': {
'type': '{0}',
'arr': '{{{0}}}',
'new_arr': 'new {type}[{values}]',
'arr[]': '{type}[] {name} = {{{values}}};',
'arr[][]': '{type}[][] {name} = {{{values}}};',
'arr[][][]': '{type}[][][] {name} = {{{values}}};',
'indent': ' ',
},
'js': {
'type': '{0}',
'arr': '[{0}]',
'new_arr': 'new Array({values}).fill({fill_with})',
'arr[]': '{name} = [{values}];',
'arr[][]': '{name} = [{values}];',
'arr[][][]': '{name} = [{values}];',
'indent': ' ',
}
}
# @formatter:on
def __init__(self, estimator, target_language='java',
target_method='predict', **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming
language.
Parameters
----------
:param estimator : MLPClassifier
An instance of a trained MLPClassifier estimator.
:param target_language : string, default: 'java'
The target programming language.
:param target_method : string, default: 'predict'
The target method of the estimator.
"""
super(MLPClassifier, self).__init__(
estimator, target_language=target_language,
target_method=target_method, **kwargs)
# Activation function ('identity', 'logistic', 'tanh' or 'relu'):
hidden_activation = estimator.activation
if hidden_activation not in self.hidden_activation_functions:
raise ValueError(("The activation function '%s' of the estimator "
"is not supported.") % hidden_activation)
# Output activation function ('softmax' or 'logistic'):
output_activation = estimator.out_activation_
if output_activation not in self.output_activation_functions:
raise ValueError(("The activation function '%s' of the estimator "
"is not supported.") % output_activation)
self.estimator = estimator
@property
def hidden_activation_functions(self):
"""Get list of supported activation functions for the hidden layers."""
return ['relu', 'identity', 'tanh', 'logistic']
@property
def output_activation_functions(self):
"""Get list of supported activation functions for the output layer."""
return ['softmax', 'logistic']
def export(self, class_name, method_name, export_data=False,
export_dir='.', export_filename='data.json',
export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
self.output_activation = est.out_activation_
self.hidden_activation = est.activation
self.n_layers = est.n_layers_
self.n_hidden_layers = est.n_layers_ - 2
self.n_inputs = len(est.coefs_[0])
self.n_outputs = est.n_outputs_
self.hidden_layer_sizes = est.hidden_layer_sizes
if isinstance(self.hidden_layer_sizes, int):
self.hidden_layer_sizes = [self.hidden_layer_sizes]
self.hidden_layer_sizes = list(self.hidden_layer_sizes)
self.layer_units = \
[self.n_inputs] + self.hidden_layer_sizes + [est.n_outputs_]
# Weights:
self.coefficients = est.coefs_
# Bias:
self.intercepts = est.intercepts_
# Binary or multiclass classifier?
self.is_binary = self.n_outputs == 1
self.prefix = 'binary' if self.is_binary else 'multi'
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Separated:
return self.predict('separated')
def predict(self, temp_type):
"""
Transpile the predict method.
Parameters
----------
:param temp_type : string
The kind of export type (embedded, separated, exported).
Returns
-------
:return : string
The transpiled predict method as string.
"""
# Exported:
if temp_type == 'exported':
temp = self.temp('exported.class')
return temp.format(class_name=self.class_name,
method_name=self.method_name)
# Separated:
temp_arr = self.temp('arr')
temp_arr_ = self.temp('arr[]')
temp_arr__ = self.temp('arr[][]')
temp_arr___ = self.temp('arr[][][]')
# Activations:
layers = list(self._get_activations())
layers = ', '.join(layers)
layers = temp_arr_.format(type='int', name='layers', values=layers)
# Coefficients (weights):
coefficients = []
for layer in self.coefficients:
layer_weights = []
for weights in layer:
weights = ', '.join([self.repr(w) for w in weights])
layer_weights.append(temp_arr.format(weights))
layer_weights = ', '.join(layer_weights)
coefficients.append(temp_arr.format(layer_weights))
coefficients = ', '.join(coefficients)
coefficients = temp_arr___.format(type='double',
name='weights',
values=coefficients)
# Intercepts (biases):
intercepts = list(self._get_intercepts())
intercepts = ', '.join(intercepts)
intercepts = temp_arr__.format(type='double',
name='bias',
values=intercepts)
temp_class = self.temp('separated.class')
file_name = '{}.js'.format(self.class_name.lower())
return temp_class.format(class_name=self.class_name,
method_name=self.method_name,
hidden_activation=self.hidden_activation,
output_activation=self.output_activation,
n_features=self.n_inputs,
weights=coefficients,
bias=intercepts,
layers=layers,
file_name=file_name)
def export_data(self, directory, filename, with_md5_hash=False):
"""
Save model data in a JSON file.
Parameters
----------
:param directory : string
The directory.
:param filename : string
The filename.
:param with_md5_hash : bool, default: False
Whether to append the checksum to the filename or not.
"""
model_data = {
'layers': [int(l) for l in list(self._get_activations())],
'weights': [c.tolist() for c in self.coefficients],
'bias': [i.tolist() for i in self.intercepts],
'hidden_activation': self.hidden_activation,
'output_activation': self.output_activation
}
encoder.FLOAT_REPR = lambda o: self.repr(o)
json_data = dumps(model_data, sort_keys=True)
if with_md5_hash:
import hashlib
json_hash = hashlib.md5(json_data).hexdigest()
filename = filename.split('.json')[0] + '_' + json_hash + '.json'
path = os.path.join(directory, filename)
with open(path, 'w') as fp:
fp.write(json_data)
def _get_intercepts(self):
"""
Concatenate all intercepts of the classifier.
"""
temp_arr = self.temp('arr')
for layer in self.intercepts:
inter = ', '.join([self.repr(b) for b in layer])
yield temp_arr.format(inter)
def _get_activations(self):
"""
Concatenate the layers sizes of the classifier except the input layer.
"""
return [str(x) for x in self.layer_units[1:]]
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n'''Driver for the TSL2561 digital luminosity (light) sensors.\n\nPick one up at http://www.adafruit.com/products/439\n\nAdafruit invests time and resources providing this open source code,\nplease support Adafrui... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n'''Driver for the TSL2561 digital luminosity (light) sensors.\n\nPick one up at http://www.adafruit.com/products/439\n\nAdafruit invests time and resources providing this open source code,\nplease... | ```python
'''Driver for the TSL2561 digital luminosity (light) sensors.
Pick one up at http://www.adafruit.com/products/439
Adafruit invests time and resources providing this open source code,
please support Adafruit and open-source hardware by purchasing
products from Adafruit!
Code ported from Adafruit Arduino library,
commit ced9f731da5095988cd66158562c2fde659e0510:
https://github.com/adafruit/Adafruit_TSL2561
'''
import time
from adafruit_i2c import Adafruit_I2C
from constants import *
import json
__author__ = 'Hugo SERRAT'
__credits__ = [
'K.Townsend (Adafruit Industries)',
'Georges Toth <georges@trypill.org>'
]
__license__ = 'BSD'
__version__ = 'v3.1'
'''HISTORY
v3.1 - Removed exception when sensor is saturated
v3.0 - Rewrote the i2c lib to make it work with python3
v2.0 - Rewrote driver for Adafruit_Sensor and Auto-Gain support, and
added lux clipping check (returns 0 lux on sensor saturation)
v1.0 - First release (previously TSL2561)
'''
class TSL2561(object):
'''Driver for the TSL2561 digital luminosity (light) sensors.'''
def __init__(self, address=None,
integration_time=TSL2561_DELAY_INTTIME_402MS,
gain=TSL2561_GAIN_1X, autogain=False, debug=False):
if address is not None:
self.address = address
else:
self.address = TSL2561_ADDR_FLOAT
self.i2c = Adafruit_I2C(self.address)
self.debug = debug
self.integration_time = integration_time
self.gain = gain
self.autogain = autogain
self._begin()
def _begin(self):
'''Initializes I2C and configures the sensor (call this function before
doing anything else)
'''
# Make sure we're actually connected
x = self.i2c.readU8(TSL2561_REGISTER_ID)
if not x & 0x0A:
raise Exception('TSL2561 not found!')
##########
# Set default integration time and gain
self.set_integration_time(self.integration_time)
self.set_gain(self.gain)
# Note: by default, the device is in power down mode on bootup
self.disable()
def enable(self):
'''Enable the device by setting the control bit to 0x03'''
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_CONTROL,
TSL2561_CONTROL_POWERON)
def disable(self):
'''Disables the device (putting it in lower power sleep mode)'''
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_CONTROL,
TSL2561_CONTROL_POWEROFF)
@staticmethod
def delay(value):
'''Delay times must be specified in milliseconds but as the python
sleep function only takes (float) seconds we need to convert the sleep
time first
'''
time.sleep(value / 1000.0)
def _get_data(self):
'''Private function to read luminosity on both channels'''
# Enable the device by setting the control bit to 0x03
self.enable()
# Wait x ms for ADC to complete
TSL2561.delay(self.integration_time)
# Reads a two byte value from channel 0 (visible + infrared)
broadband = self.i2c.readU16(TSL2561_COMMAND_BIT | TSL2561_WORD_BIT |
TSL2561_REGISTER_CHAN0_LOW)
# Reads a two byte value from channel 1 (infrared)
ir = self.i2c.readU16(TSL2561_COMMAND_BIT | TSL2561_WORD_BIT |
TSL2561_REGISTER_CHAN1_LOW)
# Turn the device off to save power
self.disable()
return (broadband, ir)
def set_integration_time(self, integration_time):
'''Sets the integration time for the TSL2561'''
# Enable the device by setting the control bit to 0x03
self.enable()
self.integration_time = integration_time
# Update the timing register
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_TIMING,
self.integration_time | self.gain)
# Turn the device off to save power
self.disable()
def set_gain(self, gain):
'''Adjusts the gain on the TSL2561 (adjusts the sensitivity to light)
'''
# Enable the device by setting the control bit to 0x03
self.enable()
self.gain = gain
# Update the timing register
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_TIMING,
self.integration_time | self.gain)
# Turn the device off to save power
self.disable()
def set_auto_range(self, value):
'''Enables or disables the auto-gain settings when reading
data from the sensor
'''
self.autogain = value
def _get_luminosity(self):
'''Gets the broadband (mixed lighting) and IR only values from
the TSL2561, adjusting gain if auto-gain is enabled
'''
valid = False
# If Auto gain disabled get a single reading and continue
if not self.autogain:
return self._get_data()
# Read data until we find a valid range
_agcCheck = False
broadband = 0
ir = 0
while not valid:
if self.integration_time == TSL2561_INTEGRATIONTIME_13MS:
_hi = TSL2561_AGC_THI_13MS
_lo = TSL2561_AGC_TLO_13MS
elif self.integration_time == TSL2561_INTEGRATIONTIME_101MS:
_hi = TSL2561_AGC_THI_101MS
_lo = TSL2561_AGC_TLO_101MS
else:
_hi = TSL2561_AGC_THI_402MS
_lo = TSL2561_AGC_TLO_402MS
_b, _ir = self._get_data()
# Run an auto-gain check if we haven't already done so ...
if not _agcCheck:
if _b < _lo and self.gain == TSL2561_GAIN_1X:
# Increase the gain and try again
self.set_gain(TSL2561_GAIN_16X)
# Drop the previous conversion results
_b, _ir = self._get_data()
# Set a flag to indicate we've adjusted the gain
_agcCheck = True
elif _b > _hi and self.gain == TSL2561_GAIN_16X:
# Drop gain to 1x and try again
self.set_gain(TSL2561_GAIN_1X)
# Drop the previous conversion results
_b, _ir = self._get_data()
# Set a flag to indicate we've adjusted the gain
_agcCheck = True
else:
# Nothing to look at here, keep moving ....
# Reading is either valid, or we're already at the chips
# limits
broadband = _b
ir = _ir
valid = True
else:
# If we've already adjusted the gain once, just return the new
# results.
# This avoids endless loops where a value is at one extreme
# pre-gain, and the the other extreme post-gain
broadband = _b
ir = _ir
valid = True
return (broadband, ir)
def _calculate_lux(self, broadband, ir):
'''Converts the raw sensor values to the standard SI lux equivalent.
Returns 0 if the sensor is saturated and the values are unreliable.
'''
# Make sure the sensor isn't saturated!
if self.integration_time == TSL2561_INTEGRATIONTIME_13MS:
clipThreshold = TSL2561_CLIPPING_13MS
elif self.integration_time == TSL2561_INTEGRATIONTIME_101MS:
clipThreshold = TSL2561_CLIPPING_101MS
else:
clipThreshold = TSL2561_CLIPPING_402MS
# Return max value 65535 lux if the sensor is saturated
if broadband > clipThreshold or ir > clipThreshold:
return 65535
# Get the correct scale depending on the integration time
if self.integration_time == TSL2561_INTEGRATIONTIME_13MS:
chScale = TSL2561_LUX_CHSCALE_TINT0
elif self.integration_time == TSL2561_INTEGRATIONTIME_101MS:
chScale = TSL2561_LUX_CHSCALE_TINT1
else:
chScale = 1 << TSL2561_LUX_CHSCALE
# Scale for gain (1x or 16x)
if not self.gain:
chScale = chScale << 4
# Scale the channel values
channel0 = (broadband * chScale) >> TSL2561_LUX_CHSCALE
channel1 = (ir * chScale) >> TSL2561_LUX_CHSCALE
# Find the ratio of the channel values (Channel1/Channel0)
ratio1 = 0
if channel0 != 0:
ratio1 = (channel1 << (TSL2561_LUX_RATIOSCALE + 1)) // channel0
# round the ratio value
ratio = (ratio1 + 1) >> 1
b = 0
m = 0
if ratio >= 0 and ratio <= TSL2561_LUX_K1T:
b = TSL2561_LUX_B1T
m = TSL2561_LUX_M1T
elif ratio <= TSL2561_LUX_K2T:
b = TSL2561_LUX_B2T
m = TSL2561_LUX_M2T
elif ratio <= TSL2561_LUX_K3T:
b = TSL2561_LUX_B3T
m = TSL2561_LUX_M3T
elif ratio <= TSL2561_LUX_K4T:
b = TSL2561_LUX_B4T
m = TSL2561_LUX_M4T
elif ratio <= TSL2561_LUX_K5T:
b = TSL2561_LUX_B5T
m = TSL2561_LUX_M5T
elif ratio <= TSL2561_LUX_K6T:
b = TSL2561_LUX_B6T
m = TSL2561_LUX_M6T
elif ratio <= TSL2561_LUX_K7T:
b = TSL2561_LUX_B7T
m = TSL2561_LUX_M7T
elif ratio > TSL2561_LUX_K8T:
b = TSL2561_LUX_B8T
m = TSL2561_LUX_M8T
temp = (channel0 * b) - (channel1 * m)
# Do not allow negative lux value
if temp < 0:
temp = 0
# Round lsb (2^(LUX_SCALE-1))
temp += 1 << (TSL2561_LUX_LUXSCALE - 1)
# Strip off fractional portion
lux = temp >> TSL2561_LUX_LUXSCALE
# Signal I2C had no errors
return lux
def lux(self):
'''Read sensor data, convert it to LUX and return it'''
broadband, ir = self._get_luminosity()
return self._calculate_lux(broadband, ir)
def getLuminosityDict(self):
data = {}
data['broadband'], data['ir'] = self._get_luminosity()
data['lux'] = self._calculate_lux(data['broadband'], data['ir'])
return data
def getLuminosityJson(self):
return json.dumps(self.getLuminosityDict())
``` |
[
{
"content": "```python\n#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file tha... | ```python
#!/usr/bin/env python3
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# requirements.txt Notes
# Create requirements.txt file command: pip freeze > requirements.txt
# Install requirements.txt file command: pip -r install requirements.txt
# Utility function to read REQUIREMENTS.txt inside a virtual env
# Parses requirements.txt into a list of requirements for the install_requires option.
def requires(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
content = f.readlines()
content = [x.strip() for x in content] #Takes /n off each line
return content
setup(
name='text-sentiment',
version='0.1.0',
packages=find_packages(exclude=['tests',]),
install_requires=requires('requirements.txt'), #All modules associated with package
license='Public Domain',
long_description=read('README'),
#url='https://example.com', #To github
#download_url='https://example.com', #Tarball download
author='Christopher Cintron',
author_email='chris.cintron502@gmail.com'
entry_points={
'console_scripts': ['text-sentiment=text_sentiment.app:main'],
},
)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n\"\"\"\n Copyright (c) 2018-2019 Alan Yorinks All rights reserved.\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n Version 3 as published by the F... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n\"\"\"\n Copyright (c) 2018-2019 Alan Yorinks All rights reserved.\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n Version 3 as pu... | ```python
"""
Copyright (c) 2018-2019 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
Version 3 as published by the Free Software Foundation; either
or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE
along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import argparse
import asyncio
import logging
import pathlib
import signal
import sys
from pymata_express.private_constants import PrivateConstants
from pymata_express.pymata_express import PymataExpress
from python_banyan.gateway_base_aio import GatewayBaseAIO
# noinspection PyAbstractClass,PyMethodMayBeStatic,PyRedundantParentheses,DuplicatedCode
class ArduinoGateway(GatewayBaseAIO):
# This class implements the GatewayBase interface adapted for asyncio.
# It supports Arduino boards, tested with Uno.
# NOTE: This class requires the use of Python 3.7 or above
# serial_port = None
def __init__(self, *subscriber_list, back_plane_ip_address=None,
subscriber_port='43125',
publisher_port='43124', process_name='ArduinoGateway',
event_loop=None, keep_alive=False, com_port=None,
arduino_instance_id=None, log=False):
"""
Set up the gateway for operation
:param subscriber_list: a tuple or list of subscription topics.
:param back_plane_ip_address: ip address of backplane or none if local
:param subscriber_port: backplane subscriber port
:param publisher_port: backplane publisher port
:param process_name: name to display on the console
:param event_loop: optional parameter to pass in an asyncio
event loop
:param keep_alive: if True, enable FirmataExpress keep-alives
:param com_port: force pymata-express to use this comport
:param arduino_instance: set an arduino instance id that must
be programmed into the FirmataExpress
sketch.
:param log: enable logging
"""
# set up logging if requested
self.log = log
if self.log:
fn = str(pathlib.Path.home()) + "/ardgw.log"
self.logger = logging.getLogger(__name__)
logging.basicConfig(filename=fn, filemode='w', level=logging.DEBUG)
sys.excepthook = self.my_handler
# set the event loop to be used. accept user's if provided
self.event_loop = event_loop
# instantiate pymata express to control the arduino
# if user want to pass in a com port, then pass it in
try:
if com_port:
self.arduino = PymataExpress(loop=self.event_loop,
com_port=com_port)
# if user wants to set an instance id, then pass it in
elif arduino_instance_id:
self.arduino = PymataExpress(loop=self.event_loop,
arduino_instance_id=arduino_instance_id)
# default settings
else:
self.arduino = PymataExpress(loop=self.event_loop)
except RuntimeError:
if self.log:
logging.exception("Exception occurred", exc_info=True)
raise
# extract pin info from self.arduino
self.number_of_digital_pins = len(self.arduino.digital_pins)
self.number_of_analog_pins = len(self.arduino.analog_pins)
self.first_analog_pin = self.arduino.first_analog_pin
# Initialize the parent
super(ArduinoGateway, self).__init__(subscriber_list=subscriber_list,
event_loop=self.event_loop,
back_plane_ip_address=back_plane_ip_address,
subscriber_port=subscriber_port,
publisher_port=publisher_port,
process_name=process_name,
)
self.first_analog_pin = self.arduino.first_analog_pin
self.keep_alive = keep_alive
def init_pins_dictionary(self):
"""
This method will initialize the pins dictionary contained
in gateway base parent class. This method is called by
the gateway base parent in its init method.
NOTE: that this a a non-asyncio method.
"""
report = self.event_loop.run_until_complete(self.arduino.get_capability_report())
x = 0
pin = 0
while x < len(report):
while report[x] != 127:
mode = report[x]
if mode == PrivateConstants.INPUT:
self.pins_dictionary[pin] = \
[GatewayBaseAIO.DIGITAL_INPUT_MODE, 0, False]
elif mode == PrivateConstants.ANALOG:
self.pins_dictionary[pin + self.first_analog_pin] = \
[GatewayBaseAIO.ANALOG_INPUT_MODE, 0, False]
x += 1
x += 1
pin += 1
# set up entry for i2c as pin 200 ( a pseudo pin number)
self.pins_dictionary[200] = GatewayBaseAIO.DIGITAL_INPUT_MODE
async def main(self):
# call the inherited begin method located in banyan_base_aio
await self.begin()
# start the keep alive on the Arduino if enabled
if self.keep_alive:
await self.arduino.keep_alive()
# sit in an endless loop to receive protocol messages
while True:
await self.receive_loop()
# The following methods and are called
# by the gateway base class in its incoming_message_processing
# method. They overwrite the default methods in the gateway_base.
async def digital_write(self, topic, payload):
"""
This method performs a digital write
:param topic: message topic
:param payload: {"command": "digital_write", "pin": “PIN”, "value": “VALUE”}
"""
await self.arduino.digital_write(payload["pin"], payload['value'])
async def disable_analog_reporting(self, topic, payload):
"""
This method disables analog input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "disable_analog_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.disable_analog_reporting(payload["pin"])
async def disable_digital_reporting(self, topic, payload):
"""
This method disables digital input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "disable_digital_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.disable_digital_reporting(payload["pin"])
async def enable_analog_reporting(self, topic, payload):
"""
This method enables analog input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "enable_analog_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.enable_analog_reporting(payload["pin"])
async def enable_digital_reporting(self, topic, payload):
"""
This method enables digital input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "enable_digital_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.enable_digital_reporting(payload["pin"])
async def i2c_read(self, topic, payload):
"""
This method will perform an i2c read by specifying the i2c
device address, i2c device register and the number of bytes
to read.
Call set_mode_i2c first to establish the pins for i2c operation.
:param topic: message topic
:param payload: {"command": "i2c_read", "pin": “PIN”, "tag": "TAG",
"addr": “I2C ADDRESS, "register": “I2C REGISTER”,
"number_of_bytes": “NUMBER OF BYTES”}
:return via the i2c_callback method
"""
await self.arduino.i2c_read(payload['addr'],
payload['register'],
payload['number_of_bytes'], callback=self.i2c_callback)
async def i2c_write(self, topic, payload):
"""
This method will perform an i2c write for the i2c device with
the specified i2c device address, i2c register and a list of byte
to write.
Call set_mode_i2c first to establish the pins for i2c operation.
:param topic: message topic
:param payload: {"command": "i2c_write", "pin": “PIN”, "tag": "TAG",
"addr": “I2C ADDRESS, "register": “I2C REGISTER”,
"data": [“DATA IN LIST FORM”]}
"""
await self.arduino.i2c_write(payload['addr'], payload['data'])
async def play_tone(self, topic, payload):
"""
This method plays a tone on a piezo device connected to the selected
pin at the frequency and duration requested.
Frequency is in hz and duration in milliseconds.
Call set_mode_tone before using this method.
:param topic: message topic
:param payload: {"command": "play_tone", "pin": “PIN”, "tag": "TAG",
“freq”: ”FREQUENCY”, duration: “DURATION”}
"""
await self.arduino.play_tone(payload['pin'],
payload['freq'],
payload['duration'])
async def pwm_write(self, topic, payload):
"""
This method sets the pwm value for the selected pin.
Call set_mode_pwm before calling this method.
:param topic: message topic
:param payload: {“command”: “pwm_write”, "pin": “PIN”,
"tag":”TAG”,
“value”: “VALUE”}
"""
await self.arduino.analog_write(payload["pin"], payload['value'])
async def servo_position(self, topic, payload):
"""
This method will set a servo's position in degrees.
Call set_mode_servo first to activate the pin for
servo operation.
:param topic: message topic
:param payload: {'command': 'servo_position',
"pin": “PIN”,'tag': 'servo',
“position”: “POSITION”}
"""
await self.arduino.servo_write(payload["pin"], payload["position"])
async def set_mode_analog_input(self, topic, payload):
"""
This method sets a GPIO pin as analog input.
:param topic: message topic
:param payload: {"command": "set_mode_analog_input", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin + self.first_analog_pin][GatewayBaseAIO.PIN_MODE] = \
GatewayBaseAIO.ANALOG_INPUT_MODE
await self.arduino.set_pin_mode_analog_input(pin, self.analog_input_callback)
async def set_mode_digital_input(self, topic, payload):
"""
This method sets a pin as digital input.
:param topic: message topic
:param payload: {"command": "set_mode_digital_input", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.DIGITAL_INPUT_MODE
await self.arduino.set_pin_mode_digital_input(pin, self.digital_input_callback)
async def set_mode_digital_input_pullup(self, topic, payload):
"""
This method sets a pin as digital input with pull up enabled.
:param topic: message topic
:param payload: message payload
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.DIGITAL_INPUT_PULLUP_MODE
await self.arduino.set_pin_mode_digital_input_pullup(pin, self.digital_input_callback)
async def set_mode_digital_output(self, topic, payload):
"""
This method sets a pin as a digital output pin.
:param topic: message topic
:param payload: {"command": "set_mode_digital_output", "pin": PIN, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.DIGITAL_OUTPUT_MODE
await self.arduino.set_pin_mode_digital_output(pin)
async def set_mode_i2c(self, topic, payload):
"""
This method sets up the i2c pins for i2c operations.
:param topic: message topic
:param payload: {"command": "set_mode_i2c"}
"""
self.pins_dictionary[200][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.I2C_MODE
await self.arduino.set_pin_mode_i2c()
async def set_mode_pwm(self, topic, payload):
"""
This method sets a GPIO pin capable of PWM for PWM operation.
:param topic: message topic
:param payload: {"command": "set_mode_pwm", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.PWM_OUTPUT_MODE
await self.arduino.set_pin_mode_pwm(pin)
async def set_mode_servo(self, topic, payload):
"""
This method establishes a GPIO pin for servo operation.
:param topic: message topic
:param payload: {"command": "set_mode_servo", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.SERVO_MODE
await self.arduino.set_pin_mode_servo(pin)
async def set_mode_sonar(self, topic, payload):
"""
This method sets the trigger and echo pins for sonar operation.
:param topic: message topic
:param payload: {"command": "set_mode_sonar", "trigger_pin": “PIN”, "tag":”TAG”
"echo_pin": “PIN”"tag":”TAG” }
"""
trigger = payload["trigger_pin"]
echo = payload["echo_pin"]
self.pins_dictionary[trigger][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.SONAR_MODE
self.pins_dictionary[echo][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.SONAR_MODE
await self.arduino.set_pin_mode_sonar(trigger, echo, callback=self.sonar_callback)
async def set_mode_stepper(self, topic, payload):
"""
This method establishes either 2 or 4 GPIO pins to be used in stepper
motor operation.
:param topic:
:param payload:{"command": "set_mode_stepper", "pins": [“PINS”],
"steps_per_revolution": “NUMBER OF STEPS”}
"""
for pin in payload['pins']:
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.STEPPER_MODE
await self.arduino.set_pin_mode_stepper(payload['steps_per_revolution'],
payload['pins'])
async def set_mode_tone(self, topic, payload):
"""
Establish a GPIO pin for tone operation.
:param topic:
:param payload:{"command": "set_mode_tone", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.TONE_MODE
await self.arduino.set_pin_mode_tone(pin)
async def stepper_write(self, topic, payload):
"""
Move a stepper motor for the specified number of steps.
:param topic:
:param payload: {"command": "stepper_write", "motor_speed": “SPEED”,
"number_of_steps":”NUMBER OF STEPS” }
"""
await self.arduino.stepper_write(payload['motor_speed'],
payload['number_of_steps'])
# Callbacks
async def digital_input_callback(self, data):
"""
Digital input data change reported by Arduino
:param data:
:return:
"""
# data = [pin mode, pin, current reported value, timestamp]
self.pins_dictionary[data[1]][GatewayBaseAIO.LAST_VALUE] = data[2]
payload = {'report': 'digital_input', 'pin': data[1],
'value': data[2], 'timestamp': data[3]}
await self.publish_payload(payload, 'from_arduino_gateway')
async def analog_input_callback(self, data):
# data = [pin mode, pin, current reported value, timestamp]
self.pins_dictionary[data[1] + self.arduino.first_analog_pin][GatewayBaseAIO.LAST_VALUE] = data[2]
payload = {'report': 'analog_input', 'pin': data[1],
'value': data[2], 'timestamp': data[3]}
await self.publish_payload(payload, 'from_arduino_gateway')
async def i2c_callback(self, data):
"""
Analog input data change reported by Arduino
:param data:
:return:
"""
# creat a string representation of the data returned
self.pins_dictionary[200] = data[1]
report = ', '.join([str(elem) for elem in data])
payload = {'report': 'i2c_data', 'value': report}
await self.publish_payload(payload, 'from_arduino_gateway')
async def sonar_callback(self, data):
"""
Sonar data change reported by Arduino
:param data:
:return:
"""
self.pins_dictionary[data[1]][GatewayBaseAIO.LAST_VALUE] = data[2]
payload = {'report': 'sonar_data', 'value': data[2]}
await self.publish_payload(payload, 'from_arduino_gateway')
def my_handler(self, tp, value, tb):
"""
for logging uncaught exceptions
:param tp:
:param value:
:param tb:
:return:
"""
self.logger.exception("Uncaught exception: {0}".format(str(value)))
# noinspection DuplicatedCode
def arduino_gateway():
# allow user to bypass the IP address auto-discovery. This is necessary if the component resides on a computer
# other than the computing running the backplane.
parser = argparse.ArgumentParser()
parser.add_argument("-b", dest="back_plane_ip_address", default="None",
help="None or IP address used by Back Plane")
parser.add_argument("-c", dest="com_port", default="None",
help="Use this COM port instead of auto discovery")
parser.add_argument("-k", dest="keep_alive", default="True",
help="Enable firmata-express keep-alive - set to True or False - default=False")
parser.add_argument("-i", dest="arduino_instance_id", default="None",
help="Set an Arduino Instance ID and match it in FirmataExpress")
parser.add_argument("-l", dest="log", default="False",
help="Set to True to turn logging on.")
parser.add_argument("-m", dest="subscriber_list",
default="to_arduino_gateway", nargs='+',
help="Banyan topics space delimited: topic1 topic2 topic3")
parser.add_argument("-n", dest="process_name",
default="ArduinoGateway", help="Set process name in "
"banner")
parser.add_argument("-p", dest="publisher_port", default='43124',
help="Publisher IP port")
parser.add_argument("-r", dest="publisher_topic",
default="from_rpi_gpio", help="Report topic")
parser.add_argument("-s", dest="subscriber_port", default='43125',
help="Subscriber IP port")
args = parser.parse_args()
subscriber_list = args.subscriber_list
kw_options = {
'publisher_port': args.publisher_port,
'subscriber_port': args.subscriber_port,
'process_name': args.process_name,
}
keep_alive = args.keep_alive.lower()
if keep_alive == 'false':
keep_alive = False
else:
keep_alive = True
kw_options['keep_alive'] = keep_alive
log = args.log.lower()
if log == 'false':
log = False
else:
log = True
kw_options['log'] = log
if args.back_plane_ip_address != 'None':
kw_options['back_plane_ip_address'] = args.back_plane_ip_address
if args.com_port != 'None':
kw_options['com_port'] = args.com_port
if args.arduino_instance_id != 'None':
kw_options['arduino_instance_id'] = int(args.arduino_instance_id)
# get the event loop
# this is for python 3.8
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
loop = asyncio.get_event_loop()
# replace with the name of your class
app = ArduinoGateway(subscriber_list, **kw_options, event_loop=loop)
try:
loop.run_until_complete(app.main())
except (KeyboardInterrupt, asyncio.CancelledError, RuntimeError):
if app.log:
logging.exception("Exception occurred", exc_info=True)
loop.stop()
loop.close()
sys.exit(0)
# signal handler function called when Control-C occurs
# noinspection PyShadowingNames,PyUnusedLocal
def signal_handler(sig, frame):
print('Exiting Through Signal Handler')
raise KeyboardInterrupt
# listen for SIGINT
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if __name__ == '__main__':
arduino_gateway()
``` |
[
{
"content": "Repeat the code precisely:\n```python\n# Copyright (c) 2015 Stephen Warren\n# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.\n#\n# SPDX-License-Identifier: GPL-2.0\n\n# Common logic to interact with U-Boot via the console. This class provides\n# the interface that tests use to e... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n# Copyright (c) 2015 Stephen Warren\n# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.\n#\n# SPDX-License-Identifier: GPL-2.0\n\n# Common logic to interact with U-Boot via the console. This class provides\n# the interface tha... | ```python
# Copyright (c) 2015 Stephen Warren
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
#
# SPDX-License-Identifier: GPL-2.0
# Common logic to interact with U-Boot via the console. This class provides
# the interface that tests use to execute U-Boot shell commands and wait for
# their results. Sub-classes exist to perform board-type-specific setup
# operations, such as spawning a sub-process for Sandbox, or attaching to the
# serial console of real hardware.
import multiplexed_log
import os
import pytest
import re
import sys
import u_boot_spawn
# Regexes for text we expect U-Boot to send to the console.
pattern_u_boot_spl_signon = re.compile('(U-Boot SPL \\d{4}\\.\\d{2}[^\r\n]*\\))')
pattern_u_boot_main_signon = re.compile('(U-Boot \\d{4}\\.\\d{2}[^\r\n]*\\))')
pattern_stop_autoboot_prompt = re.compile('Hit any key to stop autoboot: ')
pattern_unknown_command = re.compile('Unknown command \'.*\' - try \'help\'')
pattern_error_notification = re.compile('## Error: ')
pattern_error_please_reset = re.compile('### ERROR ### Please RESET the board ###')
PAT_ID = 0
PAT_RE = 1
bad_pattern_defs = (
('spl_signon', pattern_u_boot_spl_signon),
('main_signon', pattern_u_boot_main_signon),
('stop_autoboot_prompt', pattern_stop_autoboot_prompt),
('unknown_command', pattern_unknown_command),
('error_notification', pattern_error_notification),
('error_please_reset', pattern_error_please_reset),
)
class ConsoleDisableCheck(object):
"""Context manager (for Python's with statement) that temporarily disables
the specified console output error check. This is useful when deliberately
executing a command that is known to trigger one of the error checks, in
order to test that the error condition is actually raised. This class is
used internally by ConsoleBase::disable_check(); it is not intended for
direct usage."""
def __init__(self, console, check_type):
self.console = console
self.check_type = check_type
def __enter__(self):
self.console.disable_check_count[self.check_type] += 1
self.console.eval_bad_patterns()
def __exit__(self, extype, value, traceback):
self.console.disable_check_count[self.check_type] -= 1
self.console.eval_bad_patterns()
class ConsoleSetupTimeout(object):
"""Context manager (for Python's with statement) that temporarily sets up
timeout for specific command. This is useful when execution time is greater
then default 30s."""
def __init__(self, console, timeout):
self.p = console.p
self.orig_timeout = self.p.timeout
self.p.timeout = timeout
def __enter__(self):
return self
def __exit__(self, extype, value, traceback):
self.p.timeout = self.orig_timeout
class ConsoleBase(object):
"""The interface through which test functions interact with the U-Boot
console. This primarily involves executing shell commands, capturing their
results, and checking for common error conditions. Some common utilities
are also provided too."""
def __init__(self, log, config, max_fifo_fill):
"""Initialize a U-Boot console connection.
Can only usefully be called by sub-classes.
Args:
log: A mulptiplex_log.Logfile object, to which the U-Boot output
will be logged.
config: A configuration data structure, as built by conftest.py.
max_fifo_fill: The maximum number of characters to send to U-Boot
command-line before waiting for U-Boot to echo the characters
back. For UART-based HW without HW flow control, this value
should be set less than the UART RX FIFO size to avoid
overflow, assuming that U-Boot can't keep up with full-rate
traffic at the baud rate.
Returns:
Nothing.
"""
self.log = log
self.config = config
self.max_fifo_fill = max_fifo_fill
self.logstream = self.log.get_stream('console', sys.stdout)
# Array slice removes leading/trailing quotes
self.prompt = self.config.buildconfig['config_sys_prompt'][1:-1]
self.prompt_compiled = re.compile('^' + re.escape(self.prompt), re.MULTILINE)
self.p = None
self.disable_check_count = {pat[PAT_ID]: 0 for pat in bad_pattern_defs}
self.eval_bad_patterns()
self.at_prompt = False
self.at_prompt_logevt = None
def eval_bad_patterns(self):
self.bad_patterns = [pat[PAT_RE] for pat in bad_pattern_defs \
if self.disable_check_count[pat[PAT_ID]] == 0]
self.bad_pattern_ids = [pat[PAT_ID] for pat in bad_pattern_defs \
if self.disable_check_count[pat[PAT_ID]] == 0]
def close(self):
"""Terminate the connection to the U-Boot console.
This function is only useful once all interaction with U-Boot is
complete. Once this function is called, data cannot be sent to or
received from U-Boot.
Args:
None.
Returns:
Nothing.
"""
if self.p:
self.p.close()
self.logstream.close()
def run_command(self, cmd, wait_for_echo=True, send_nl=True,
wait_for_prompt=True):
"""Execute a command via the U-Boot console.
The command is always sent to U-Boot.
U-Boot echoes any command back to its output, and this function
typically waits for that to occur. The wait can be disabled by setting
wait_for_echo=False, which is useful e.g. when sending CTRL-C to
interrupt a long-running command such as "ums".
Command execution is typically triggered by sending a newline
character. This can be disabled by setting send_nl=False, which is
also useful when sending CTRL-C.
This function typically waits for the command to finish executing, and
returns the console output that it generated. This can be disabled by
setting wait_for_prompt=False, which is useful when invoking a long-
running command such as "ums".
Args:
cmd: The command to send.
wait_for_echo: Boolean indicating whether to wait for U-Boot to
echo the command text back to its output.
send_nl: Boolean indicating whether to send a newline character
after the command string.
wait_for_prompt: Boolean indicating whether to wait for the
command prompt to be sent by U-Boot. This typically occurs
immediately after the command has been executed.
Returns:
If wait_for_prompt == False:
Nothing.
Else:
The output from U-Boot during command execution. In other
words, the text U-Boot emitted between the point it echod the
command string and emitted the subsequent command prompts.
"""
if self.at_prompt and \
self.at_prompt_logevt != self.logstream.logfile.cur_evt:
self.logstream.write(self.prompt, implicit=True)
try:
self.at_prompt = False
if send_nl:
cmd += '\n'
while cmd:
# Limit max outstanding data, so UART FIFOs don't overflow
chunk = cmd[:self.max_fifo_fill]
cmd = cmd[self.max_fifo_fill:]
self.p.send(chunk)
if not wait_for_echo:
continue
chunk = re.escape(chunk)
chunk = chunk.replace('\\\n', '[\r\n]')
m = self.p.expect([chunk] + self.bad_patterns)
if m != 0:
self.at_prompt = False
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
if not wait_for_prompt:
return
m = self.p.expect([self.prompt_compiled] + self.bad_patterns)
if m != 0:
self.at_prompt = False
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
self.at_prompt = True
self.at_prompt_logevt = self.logstream.logfile.cur_evt
# Only strip \r\n; space/TAB might be significant if testing
# indentation.
return self.p.before.strip('\r\n')
except Exception as ex:
self.log.error(str(ex))
self.cleanup_spawn()
raise
def run_command_list(self, cmds):
"""Run a list of commands.
This is a helper function to call run_command() with default arguments
for each command in a list.
Args:
cmd: List of commands (each a string).
Returns:
A list of output strings from each command, one element for each
command.
"""
output = []
for cmd in cmds:
output.append(self.run_command(cmd))
return output
def ctrlc(self):
"""Send a CTRL-C character to U-Boot.
This is useful in order to stop execution of long-running synchronous
commands such as "ums".
Args:
None.
Returns:
Nothing.
"""
self.log.action('Sending Ctrl-C')
self.run_command(chr(3), wait_for_echo=False, send_nl=False)
def wait_for(self, text):
"""Wait for a pattern to be emitted by U-Boot.
This is useful when a long-running command such as "dfu" is executing,
and it periodically emits some text that should show up at a specific
location in the log file.
Args:
text: The text to wait for; either a string (containing raw text,
not a regular expression) or an re object.
Returns:
Nothing.
"""
if type(text) == type(''):
text = re.escape(text)
m = self.p.expect([text] + self.bad_patterns)
if m != 0:
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
def drain_console(self):
"""Read from and log the U-Boot console for a short time.
U-Boot's console output is only logged when the test code actively
waits for U-Boot to emit specific data. There are cases where tests
can fail without doing this. For example, if a test asks U-Boot to
enable USB device mode, then polls until a host-side device node
exists. In such a case, it is useful to log U-Boot's console output
in case U-Boot printed clues as to why the host-side even did not
occur. This function will do that.
Args:
None.
Returns:
Nothing.
"""
# If we are already not connected to U-Boot, there's nothing to drain.
# This should only happen when a previous call to run_command() or
# wait_for() failed (and hence the output has already been logged), or
# the system is shutting down.
if not self.p:
return
orig_timeout = self.p.timeout
try:
# Drain the log for a relatively short time.
self.p.timeout = 1000
# Wait for something U-Boot will likely never send. This will
# cause the console output to be read and logged.
self.p.expect(['This should never match U-Boot output'])
except u_boot_spawn.Timeout:
pass
finally:
self.p.timeout = orig_timeout
def ensure_spawned(self):
"""Ensure a connection to a correctly running U-Boot instance.
This may require spawning a new Sandbox process or resetting target
hardware, as defined by the implementation sub-class.
This is an internal function and should not be called directly.
Args:
None.
Returns:
Nothing.
"""
if self.p:
return
try:
self.log.start_section('Starting U-Boot')
self.at_prompt = False
self.p = self.get_spawn()
# Real targets can take a long time to scroll large amounts of
# text if LCD is enabled. This value may need tweaking in the
# future, possibly per-test to be optimal. This works for 'help'
# on board 'seaboard'.
if not self.config.gdbserver:
self.p.timeout = 30000
self.p.logfile_read = self.logstream
bcfg = self.config.buildconfig
config_spl = bcfg.get('config_spl', 'n') == 'y'
config_spl_serial_support = bcfg.get('config_spl_serial_support',
'n') == 'y'
env_spl_skipped = self.config.env.get('env__spl_skipped',
False)
if config_spl and config_spl_serial_support and not env_spl_skipped:
m = self.p.expect([pattern_u_boot_spl_signon] +
self.bad_patterns)
if m != 0:
raise Exception('Bad pattern found on SPL console: ' +
self.bad_pattern_ids[m - 1])
m = self.p.expect([pattern_u_boot_main_signon] + self.bad_patterns)
if m != 0:
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
self.u_boot_version_string = self.p.after
while True:
m = self.p.expect([self.prompt_compiled,
pattern_stop_autoboot_prompt] + self.bad_patterns)
if m == 0:
break
if m == 1:
self.p.send(' ')
continue
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 2])
self.at_prompt = True
self.at_prompt_logevt = self.logstream.logfile.cur_evt
except Exception as ex:
self.log.error(str(ex))
self.cleanup_spawn()
raise
finally:
self.log.end_section('Starting U-Boot')
def cleanup_spawn(self):
"""Shut down all interaction with the U-Boot instance.
This is used when an error is detected prior to re-establishing a
connection with a fresh U-Boot instance.
This is an internal function and should not be called directly.
Args:
None.
Returns:
Nothing.
"""
try:
if self.p:
self.p.close()
except:
pass
self.p = None
def restart_uboot(self):
"""Shut down and restart U-Boot."""
self.cleanup_spawn()
self.ensure_spawned()
def get_spawn_output(self):
"""Return the start-up output from U-Boot
Returns:
The output produced by ensure_spawed(), as a string.
"""
if self.p:
return self.p.get_expect_output()
return None
def validate_version_string_in_text(self, text):
"""Assert that a command's output includes the U-Boot signon message.
This is primarily useful for validating the "version" command without
duplicating the signon text regex in a test function.
Args:
text: The command output text to check.
Returns:
Nothing. An exception is raised if the validation fails.
"""
assert(self.u_boot_version_string in text)
def disable_check(self, check_type):
"""Temporarily disable an error check of U-Boot's output.
Create a new context manager (for use with the "with" statement) which
temporarily disables a particular console output error check.
Args:
check_type: The type of error-check to disable. Valid values may
be found in self.disable_check_count above.
Returns:
A context manager object.
"""
return ConsoleDisableCheck(self, check_type)
def temporary_timeout(self, timeout):
"""Temporarily set up different timeout for commands.
Create a new context manager (for use with the "with" statement) which
temporarily change timeout.
Args:
timeout: Time in milliseconds.
Returns:
A context manager object.
"""
return ConsoleSetupTimeout(self, timeout)
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright (C) 2009-2016 Glencoe Software, Inc. All Rights Reserved.\n# Use is subject to license terms supplied in LICENSE.txt\n#\n# This program is free software; you... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright (C) 2009-2016 Glencoe Software, Inc. All Rights Reserved.\n# Use is subject to license terms supplied in LICENSE.txt\n#\n# This program is fr... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2016 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Startup plugin for command-line importer.
"""
import os
import csv
import sys
import shlex
from omero.cli import BaseControl, CLI
import omero.java
from omero_ext.argparse import SUPPRESS
from path import path
START_CLASS = "ome.formats.importer.cli.CommandLineImporter"
TEST_CLASS = "ome.formats.test.util.TestEngine"
HELP = """Run the Java-based command-line importer
This is a Python wrapper around the Java importer. Login is handled by Python
OMERO.cli. To see more options, use "--javahelp".
Options marked with "**" are passed strictly to Java. If they interfere with
any of the Python arguments, you may need to end precede your arguments with a
"--".
Bulk imports:
Rather than passing one or more files to the import command, a single
dictionary-like file (e.g. yml or json) can be passed to the `--bulk`
argument. Most keys in the bulk file will be treated like additional
command-line arguments. Special keys include:
* columns A list of columns for parsing the value of path
* continue Like the "-c" changes error handling
* dry_run Prints out additional arguments rather than running them
* include Relative path (from the bulk file) of a parent bulk file
* path A file which will be parsed line by line based on its file
ending. Lines containing zero or more keys along with a
single file to be imported. Options for formats include:
- .tsv and .csv files will be parsed by the existing library
- other files will be parsed with shlex
- unless no columns are specified, in which case each line
is treated as a file
"""
EXAMPLES = """
Examples:
# Display help
$ bin/omero import -h
# Import foo.tiff using current login
$ bin/omero import ~/Data/my_file.dv
# Import foo.tiff using input credentials
$ bin/omero import -s localhost -u user -w password foo.tiff
# Set Java debugging level to ALL
$ bin/omero import foo.tiff -- --debug=ALL
# Display used files for importing foo.tiff
$ bin/omero import foo.tiff -f
# Limit debugging output
$ bin/omero import -- --debug=ERROR foo.tiff
For additional information, see:
https://docs.openmicroscopy.org/latest/omero/users/cli/import.html
Report bugs to <ome-users@lists.openmicroscopy.org.uk>
"""
TESTHELP = """Run the Importer TestEngine suite (devs-only)"""
DEBUG_CHOICES = ["ALL", "DEBUG", "ERROR", "FATAL", "INFO", "TRACE", "WARN"]
OUTPUT_CHOICES = ["ids", "legacy", "yaml"]
SKIP_CHOICES = ['all', 'checksum', 'minmax', 'thumbnails', 'upgrade']
NO_ARG = object()
class CommandArguments(object):
def __init__(self, ctx, args):
self.__ctx = ctx
self.__args = args
self.__accepts = set()
self.__added = dict()
self.__java_initial = list()
self.__java_additional = list()
self.__py_initial = list()
self.__py_additional = list()
# Python arguments
self.__py_keys = (
"javahelp", "skip", "file", "errs", "logback",
"port", "password", "group", "create", "func",
"bulk", "prog", "user", "key", "path", "logprefix",
"JAVA_DEBUG", "quiet", "server", "depth", "clientdir",
"sudo")
self.set_login_arguments(ctx, args)
self.set_skip_arguments(args)
for key in vars(args):
self.__accepts.add(key)
val = getattr(args, key)
if key in self.__py_keys:
# Place the Python elements on the CommandArguments
# instance so that it behaves like `args`
setattr(self, key, val)
self.append_arg(self.__py_initial, key, val)
elif not val:
# If there's no value, do nothing
pass
else:
self.append_arg(self.__java_initial, key, val)
def append_arg(self, cmd_list, key, val=NO_ARG):
arg_list = self.build_arg_list(key, val)
cmd_list.extend(arg_list)
def reset_arg(self, cmd_list, idx, key, val=NO_ARG):
arg_list = self.build_arg_list(key, val)
cmd_list[idx:idx+len(arg_list)] = arg_list
def build_arg_list(self, key, val=NO_ARG):
arg_list = []
if len(key) == 1:
arg_list.append("-"+key)
if val != NO_ARG:
if isinstance(val, (str, unicode)):
arg_list.append(val)
else:
key = key.replace("_", "-")
if val == NO_ARG:
arg_list.append("--%s" % key)
elif isinstance(val, (str, unicode)):
arg_list.append(
"--%s=%s" % (key, val))
else:
arg_list.append("--%s" % key)
return arg_list
def set_path(self, path):
if not isinstance(path, list):
self.__ctx.die(202, "Path is not a list")
else:
self.path = path
def java_args(self):
rv = list()
rv.extend(self.__java_initial)
rv.extend(self.__java_additional)
rv.extend(self.path)
if self.JAVA_DEBUG:
# Since "args.debug" is used by omero/cli.py itself,
# uses of "--debug" *after* the `import` command are
# handled by placing them in this special variable.
rv.append("--debug=%s" % self.JAVA_DEBUG)
return rv
def initial_args(self):
rv = list()
rv.extend(self.__py_initial)
rv.extend(self.__java_initial)
return rv
def added_args(self):
rv = list()
rv.extend(self.__py_additional)
rv.extend(self.__java_additional)
rv.extend(self.path)
return rv
def accepts(self, key):
return key in self.__accepts
def add(self, key, val=NO_ARG):
idx = None
if key in self.__added:
idx = self.__added[key]
if key in self.__py_keys:
# First we check if this is a Python argument, in which
# case it's set directly on the instance itself. This
# may need to be later set elsewhere if multiple bulk
# files are supported.
setattr(self, key, val)
where = self.__py_additional
elif not self.accepts(key):
self.__ctx.die(200, "Unknown argument: %s" % key)
else:
where = self.__java_additional
if idx is None:
idx = len(where)
self.append_arg(where, key, val)
self.__added[key] = idx
else:
self.reset_arg(where, idx, key, val)
def set_login_arguments(self, ctx, args):
"""Set the connection arguments"""
if args.javahelp:
self.__java_initial.append("-h")
# Connection is required unless help arguments or -f is passed
connection_required = ("-h" not in self.__java_initial and
not args.f and
not args.advanced_help)
if connection_required:
client = ctx.conn(args)
host = client.getProperty("omero.host")
port = client.getProperty("omero.port")
session = client.getSessionId()
self.__java_initial.extend(["-s", host])
self.__java_initial.extend(["-p", port])
self.__java_initial.extend(["-k", session])
def set_skip_arguments(self, args):
"""Set the arguments to skip steps during import"""
if not args.skip:
return
if ('all' in args.skip or 'checksum' in args.skip):
self.__java_initial.append("--checksum-algorithm=File-Size-64")
if ('all' in args.skip or 'thumbnails' in args.skip):
self.__java_initial.append("--no-thumbnails")
if ('all' in args.skip or 'minmax' in args.skip):
self.__java_initial.append("--no-stats-info")
if ('all' in args.skip or 'upgrade' in args.skip):
self.__java_initial.append("--no-upgrade-check")
def open_files(self):
# Open file handles for stdout/stderr if applicable
out = self.open_log(self.__args.file, self.__args.logprefix)
err = self.open_log(self.__args.errs, self.__args.logprefix)
return out, err
def open_log(self, file, prefix=None):
if not file:
return None
if prefix:
file = os.path.sep.join([prefix, file])
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
return open(file, "w")
class ImportControl(BaseControl):
COMMAND = [START_CLASS]
def _configure(self, parser):
parser.add_login_arguments()
parser.add_argument(
"--javahelp", "--java-help",
action="store_true", help="Show the Java help text")
# The following arguments are strictly used by Python
# The "---" form is kept for backwards compatibility.
py_group = parser.add_argument_group(
'Python arguments',
'Optional arguments which are used to configure import.')
def add_python_argument(*args, **kwargs):
py_group.add_argument(*args, **kwargs)
for name, help in (
("bulk", "Bulk YAML file for driving multiple imports"),
("logprefix", "Directory or file prefix for --file and --errs"),
("file", "File for storing the standard out of the Java process"),
("errs", "File for storing the standard err of the Java process")
):
add_python_argument("--%s" % name, nargs="?", help=help)
add_python_argument("---%s" % name, nargs="?", help=SUPPRESS)
add_python_argument(
"--clientdir", type=str,
help="Path to the directory containing the client JARs. "
" Default: lib/client")
add_python_argument(
"--logback", type=str,
help="Path to a logback xml file. "
" Default: etc/logback-cli.xml")
# The following arguments are strictly passed to Java
name_group = parser.add_argument_group(
'Naming arguments', 'Optional arguments passed strictly to Java.')
def add_java_name_argument(*args, **kwargs):
name_group.add_argument(*args, **kwargs)
add_java_name_argument(
"-n", "--name",
help="Image or plate name to use (**)",
metavar="NAME")
add_java_name_argument(
"-x", "--description",
help="Image or plate description to use (**)",
metavar="DESCRIPTION")
# Deprecated naming arguments
add_java_name_argument(
"--plate_name",
help=SUPPRESS)
add_java_name_argument(
"--plate_description",
help=SUPPRESS)
# Feedback options
feedback_group = parser.add_argument_group(
'Feedback arguments',
'Optional arguments passed strictly to Java allowing to report'
' errors to the OME team.')
def add_feedback_argument(*args, **kwargs):
feedback_group.add_argument(*args, **kwargs)
add_feedback_argument(
"--report", action="store_true",
help="Report errors to the OME team (**)")
add_feedback_argument(
"--upload", action="store_true",
help=("Upload broken files and log file (if any) with report."
" Required --report (**)"))
add_feedback_argument(
"--logs", action="store_true",
help=("Upload log file (if any) with report."
" Required --report (**)"))
add_feedback_argument(
"--email",
help="Email for reported errors. Required --report (**)",
metavar="EMAIL")
add_feedback_argument(
"--qa-baseurl",
help=SUPPRESS)
# Annotation options
annotation_group = parser.add_argument_group(
'Annotation arguments',
'Optional arguments passed strictly to Java allowing to annotate'
' imports.')
def add_annotation_argument(*args, **kwargs):
annotation_group.add_argument(*args, **kwargs)
add_annotation_argument(
"--annotation-ns", metavar="ANNOTATION_NS",
help="Namespace to use for subsequent annotation (**)")
add_annotation_argument(
"--annotation-text", metavar="ANNOTATION_TEXT",
help="Content for a text annotation (**)")
add_annotation_argument(
"--annotation-link",
metavar="ANNOTATION_LINK",
help="Comment annotation ID to link all images to (**)")
add_annotation_argument(
"--annotation_ns", metavar="ANNOTATION_NS",
help=SUPPRESS)
add_annotation_argument(
"--annotation_text", metavar="ANNOTATION_TEXT",
help=SUPPRESS)
add_annotation_argument(
"--annotation_link", metavar="ANNOTATION_LINK",
help=SUPPRESS)
java_group = parser.add_argument_group(
'Java arguments', 'Optional arguments passed strictly to Java')
def add_java_argument(*args, **kwargs):
java_group.add_argument(*args, **kwargs)
add_java_argument(
"-f", action="store_true",
help="Display the used files and exit (**)")
add_java_argument(
"-c", action="store_true",
help="Continue importing after errors (**)")
add_java_argument(
"-l",
help="Use the list of readers rather than the default (**)",
metavar="READER_FILE")
add_java_argument(
"-d",
help="OMERO dataset ID to import image into (**)",
metavar="DATASET_ID")
add_java_argument(
"-r",
help="OMERO screen ID to import plate into (**)",
metavar="SCREEN_ID")
add_java_argument(
"-T", "--target",
help="OMERO target specification (**)",
metavar="TARGET")
add_java_argument(
"--debug", choices=DEBUG_CHOICES,
help="Turn debug logging on (**)",
metavar="LEVEL", dest="JAVA_DEBUG")
add_java_argument(
"--output", choices=OUTPUT_CHOICES,
help="Set an alternative output style",
metavar="TYPE")
# Arguments previously *following" `--`
advjava_group = parser.add_argument_group(
'Advanced Java arguments', (
'Optional arguments passed strictly to Java.'
'For more information, see --advanced-help'))
def add_advjava_argument(*args, **kwargs):
advjava_group.add_argument(*args, **kwargs)
add_advjava_argument(
"--advanced-help", action="store_true",
help="Show the advanced help text")
add_advjava_argument(
"--transfer", nargs="?", metavar="TYPE",
help="Transfer methods like in-place import")
add_advjava_argument(
"--exclude", nargs="?", metavar="TYPE",
help="Exclusion filters for preventing re-import")
add_advjava_argument(
"--checksum-algorithm", nargs="?", metavar="TYPE",
help="Alternative hashing mechanisms balancing speed & accuracy")
# Unsure on these.
add_python_argument(
"--depth", default=4, type=int,
help="Number of directories to scan down for files")
add_python_argument(
"--skip", type=str, choices=SKIP_CHOICES, action='append',
help="Optional step to skip during import")
add_python_argument(
"path", nargs="*",
help="Path to be passed to the Java process")
parser.set_defaults(func=self.importer)
def importer(self, args):
if args.clientdir:
client_dir = path(args.clientdir)
else:
client_dir = self.ctx.dir / "lib" / "client"
etc_dir = self.ctx.dir / "etc"
if args.logback:
xml_file = path(args.logback)
else:
xml_file = etc_dir / "logback-cli.xml"
logback = "-Dlogback.configurationFile=%s" % xml_file
try:
classpath = [file.abspath() for file in client_dir.files("*.jar")]
except OSError as e:
self.ctx.die(102, "Cannot get JAR files from '%s' (%s)"
% (client_dir, e.strerror))
if not classpath:
self.ctx.die(103, "No JAR files found under '%s'" % client_dir)
command_args = CommandArguments(self.ctx, args)
xargs = [logback, "-Xmx1024M", "-cp", os.pathsep.join(classpath)]
xargs.append("-Domero.import.depth=%s" % args.depth)
if args.bulk and args.path:
self.ctx.die(104, "When using bulk import, omit paths")
elif args.bulk:
self.bulk_import(command_args, xargs)
else:
self.do_import(command_args, xargs)
def do_import(self, command_args, xargs):
out = err = None
try:
import_command = self.COMMAND + command_args.java_args()
out, err = command_args.open_files()
p = omero.java.popen(
import_command, debug=False, xargs=xargs,
stdout=out, stderr=err)
self.ctx.rv = p.wait()
finally:
# Make sure file handles are closed
if out:
out.close()
if err:
err.close()
def bulk_import(self, command_args, xargs):
try:
from yaml import safe_load
except ImportError:
self.ctx.die(105, "yaml is unsupported")
old_pwd = os.getcwd()
try:
# Walk the .yml graph looking for includes
# and load them all so that the top parent
# values can be overwritten.
contents = list()
bulkfile = command_args.bulk
while bulkfile:
bulkfile = os.path.abspath(bulkfile)
parent = os.path.dirname(bulkfile)
with open(bulkfile, "r") as f:
data = safe_load(f)
contents.append((bulkfile, parent, data))
bulkfile = data.get("include")
os.chdir(parent)
# TODO: included files are updated based on the including
# file but other file paths aren't!
bulk = dict()
for bulkfile, parent, data in reversed(contents):
bulk.update(data)
os.chdir(parent)
failed = 0
total = 0
for cont in self.parse_bulk(bulk, command_args):
if command_args.dry_run:
rv = ['"%s"' % x for x in command_args.added_args()]
rv = " ".join(rv)
self.ctx.out(rv)
else:
self.do_import(command_args, xargs)
if self.ctx.rv:
failed += 1
total += self.ctx.rv
if cont:
msg = "Import failed with error code: %s. Continuing"
self.ctx.err(msg % self.ctx.rv)
else:
msg = "Import failed. Use -c to continue after errors"
self.ctx.die(106, msg)
# Fail if any import failed
self.ctx.rv = total
if failed:
self.ctx.err("%x failed imports" % failed)
finally:
os.chdir(old_pwd)
def parse_bulk(self, bulk, command_args):
# Known keys with special handling
cont = False
command_args.dry_run = False
if "dry_run" in bulk:
dry_run = bulk.pop("dry_run")
command_args.dry_run = dry_run
if "continue" in bulk:
cont = True
c = bulk.pop("continue")
if bool(c):
command_args.add("c")
if "path" not in bulk:
# Required until @file format is implemented
self.ctx.die(107, "No path specified")
path = bulk.pop("path")
cols = None
if "columns" in bulk:
cols = bulk.pop("columns")
if "include" in bulk:
bulk.pop("include")
# Now parse all other keys
for key in bulk:
command_args.add(key, bulk[key])
# All properties are set, yield for each path
# to be imported in turn. The value for `cont`
# is yielded so that the caller knows whether
# or not an error should be fatal.
if not cols:
# No parsing necessary
function = self.parse_text
else:
function = self.parse_shlex
if path.endswith(".tsv"):
function = self.parse_tsv
elif path.endswith(".csv"):
function = self.parse_csv
for parts in function(path):
if not cols:
command_args.set_path(parts)
else:
for idx, col in enumerate(cols):
if col == "path":
command_args.set_path([parts[idx]])
else:
command_args.add(col, parts[idx])
yield cont
def parse_text(self, path, parse=False):
with open(path, "r") as o:
for line in o:
line = line.strip()
if parse:
line = shlex.split(line)
yield [line]
def parse_shlex(self, path):
for line in self.parse_text(path, parse=True):
yield line
def parse_tsv(self, path, delimiter="\t"):
for line in self.parse_csv(path, delimiter):
yield line
def parse_csv(self, path, delimiter=","):
with open(path, "r") as data:
for line in csv.reader(data, delimiter=delimiter):
yield line
class TestEngine(ImportControl):
COMMAND = [TEST_CLASS]
try:
register("import", ImportControl, HELP, epilog=EXAMPLES)
register("testengine", TestEngine, TESTHELP)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("import", ImportControl, HELP, epilog=EXAMPLES)
cli.register("testengine", TestEngine, TESTHELP)
cli.invoke(sys.argv[1:])
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n# -*- coding: utf8 -*-\n# This file is part of PYBOSSA.\n#\n# Copyright (C) 2015 SF Isle of Man Limited\n#\n# PYBOSSA is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# -*- coding: utf8 -*-\n# This file is part of PYBOSSA.\n#\n# Copyright (C) 2015 SF Isle of Man Limited\n#\n# PYBOSSA is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affer... | ```python
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 SF Isle of Man Limited
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
class WebhookAuth(object):
_specific_actions = []
def __init__(self, project_repo):
self.project_repo = project_repo
@property
def specific_actions(self):
return self._specific_actions
def can(self, user, action, webhook=None, project_id=None):
action = ''.join(['_', action])
return getattr(self, action)(user, webhook, project_id)
def _create(self, user, webhook, project_id=None):
return False
def _read(self, user, webhook=None, project_id=None):
if user.is_anonymous() or (webhook is None and project_id is None):
return False
project = self._get_project(webhook, project_id)
return user.admin or user.id in project.owners_ids
def _update(self, user, webhook, project_id=None):
return False
def _delete(self, user, webhook, project_id=None):
return False
def _get_project(self, webhook, project_id):
if webhook is not None:
return self.project_repo.get(webhook.project_id)
return self.project_repo.get(project_id)
``` |
[
{
"content": "Repeat the following code:\n```python\nfrom multiprocessing import Process\nfrom bs4 import BeautifulSoup\nimport time\n\nfrom google.google_gmail import get_gmail_lists, get_gmail_message_mime, delete_gmail_message\nfrom config.cfg import put_config_axiscare_url\nfrom log.log import log_general, ... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\nfrom multiprocessing import Process\nfrom bs4 import BeautifulSoup\nimport time\n\nfrom google.google_gmail import get_gmail_lists, get_gmail_message_mime, delete_gmail_message\nfrom config.cfg import put_config_axiscare_url\nfrom log.log impo... | ```python
from multiprocessing import Process
from bs4 import BeautifulSoup
import time
from google.google_gmail import get_gmail_lists, get_gmail_message_mime, delete_gmail_message
from config.cfg import put_config_axiscare_url
from log.log import log_general, log_error
def eml_list():
return get_gmail_lists()
def get_ids(id_list):
ids = []
for l in id_list:
ids.append(l['id'])
return ids
def get_emails(ids):
emls = []
for id in ids:
e = get_gmail_message_mime(id)
emls.append({'id': id, 'email': e})
return emls
def extract_url(eml):
#
for p in eml.get_payload():
if not isinstance(p.get_payload(), str):
for p2 in p.get_payload():
for h in p2._headers:
if h[0]== 'Content-Type' and h[1].startswith('text/html'):
payload = p2.get_payload()
soup = BeautifulSoup(payload, "html.parser")
a_all = soup.findAll("a")
for a in a_all:
href = a.attrs['href'].replace('3D', '').replace('\"', '')
if href.startswith('https://1000.axiscare.com'):
#Assumption that html version appears before pdf version
return href
#
return False
def process_emls(emls):
#
for e in emls:
#
url = extract_url(e['email'])
#
if url:
put_config_axiscare_url(url)
#Delete email
delete_gmail_message(e['id'])
return True
return False
def url_updater():
#
updatestatus = False
#
while True:
#
try:
eml_lists = eml_list()
#
if len(eml_lists) > 0:
#
eml_ids = get_ids(eml_lists)
#
if len(eml_ids) > 0:
#
emls = get_emails(eml_ids)
updatestatus = process_emls(emls)
#
if updatestatus:
msg_success = 'the url stored in config.json has been updated'
else:
msg_success = 'no new urls received'
log_general('Axiscare URL updater process completed - {msg_success}'.format(msg_success=msg_success))
#
except Exception as e:
log_error('Could not process emails to check for new URL notification - {error}'.format(error=e))
#
time.sleep(300) #5mins
def start_url_updater():
process_urlupdater = Process(target=url_updater)
process_urlupdater.start()
log_general('Axiscare URL updater process started')
``` |
[
{
"content": "```python\nimport pytest\nimport sys\nimport unittest\n\nfrom exam import Exam, fixture\nfrom lxml import etree, html\nfrom lxml.cssselect import CSSSelector\n\nfrom toronado import Rule, Properties, inline, from_string\n\ntry:\n from lxml.html import soupparser\nexcept ImportError:\n souppa... | [
{
"content": "<|memory_start|>```python\nimport pytest\nimport sys\nimport unittest\n\nfrom exam import Exam, fixture\nfrom lxml import etree, html\nfrom lxml.cssselect import CSSSelector\n\nfrom toronado import Rule, Properties, inline, from_string\n\ntry:\n from lxml.html import soupparser\nexcept ImportEr... | ```python
import pytest
import sys
import unittest
from exam import Exam, fixture
from lxml import etree, html
from lxml.cssselect import CSSSelector
from toronado import Rule, Properties, inline, from_string
try:
from lxml.html import soupparser
except ImportError:
soupparser = None
class TestCase(Exam, unittest.TestCase):
pass
class RuleTestCase(TestCase):
def test_compares_by_specificity(self):
self.assertGreater(Rule('#main'), Rule('div'))
self.assertEqual(Rule('div'), Rule('p'))
self.assertLess(Rule('div'), Rule('div.container'))
def test_combine_respects_specificity_rules(self):
properties = Rule.combine((
Rule('h1', {
'font-weight': 'bold',
'color': 'blue',
}),
Rule('h1#primary', {
'color': 'red',
}),
))
self.assertIsInstance(properties, Properties)
self.assertEqual(properties, {
'font-weight': 'bold',
'color': 'red',
})
class PropertiesTestCase(TestCase):
def test_serializes_to_attribute_string(self):
properties = Properties({
'font-weight': 'bold',
'color': 'red',
})
# XXX: Ordering is non-deterministic, so we have to check both variations.
expected = set((
'font-weight: bold; color: red',
'color: red; font-weight: bold',
))
self.assertIn(u'%s' % (properties,), expected)
def test_from_string(self):
properties = Properties.from_string('color: red; font-weight: bold')
self.assertEqual(properties, {
'color': 'red',
'font-weight': 'bold',
})
def test_from_string_cleans_whitespace(self):
properties = Properties.from_string('color : red;\nfont-weight: bold ;')
self.assertEqual(properties, {
'color': 'red',
'font-weight': 'bold',
})
class InlineTestCase(TestCase):
def test_inlines_styles(self):
tree = html.document_fromstring("""
<html>
<head>
<style type="text/css">
h1 { color: red; }
</style>
</head>
<body>
<h1>Hello, world.</h1>
</body>
</html>
""")
inline(tree)
heading, = tree.cssselect('h1')
self.assertEqual(heading.attrib['style'], 'color: red')
def test_does_not_override_inlined_styles(self):
tree = html.document_fromstring("""
<html>
<head>
<style type="text/css">
h1 {
color: red;
display: block;
}
</style>
</head>
<body>
<h1 style="color: blue; font-weight: bold">Hello, world.</h1>
</body>
</html>
""")
inline(tree)
heading, = tree.cssselect('h1')
properties = Properties.from_string(heading.attrib['style'])
self.assertEqual(properties, {
'color': 'blue',
'display': 'block',
'font-weight': 'bold',
})
def test_removes_compiled_styles(self):
tree = html.document_fromstring("""
<html>
<head>
<style type="text/css">
h1 { font-weight: bold; }
</style>
</head>
<body>
<h1>Hello, world.</h1>
</body>
</html>
""")
inline(tree)
heading, = tree.cssselect('h1')
self.assertEqual(heading.attrib['style'], 'font-weight: bold')
self.assertEqual(len(tree.cssselect('style')), 0)
def test_skips_inline_false(self):
tree = html.document_fromstring("""
<html>
<head>
<style type="text/css">
h1 { font-weight: bold; }
</style>
<style type="text/css" inline="false">
h1 { color: red; }
</style>
</head>
<body>
<h1>Hello, world.</h1>
</body>
</html>
""")
inline(tree)
heading, = tree.cssselect('h1')
self.assertEqual(heading.attrib['style'], 'font-weight: bold')
stylesheet, = tree.cssselect('style')
self.assertNotIn('inline', stylesheet.attrib)
class ParserTestCase(TestCase):
document = """
<html>
<head>
<style type="text/css">
h1 { color: red; }
</style>
</head>
<body>
<h1>Hello, world.</h1>
</body>
</html>
"""
def assertInlines(self, tree):
inline(tree)
heading, = CSSSelector('h1')(tree)
self.assertEqual(heading.attrib['style'], 'color: red')
def test_etree(self):
tree = etree.fromstring(self.document)
self.assertInlines(tree)
def test_html(self):
tree = html.document_fromstring(self.document)
self.assertInlines(tree)
@pytest.mark.skipif(soupparser is None,
reason='BeautifulSoup is not installed')
def test_beautifulsoup(self):
tree = soupparser.fromstring(self.document)
self.assertInlines(tree)
def test_from_string(self):
result = from_string(self.document)
tree = etree.fromstring(result)
self.assertInlines(tree)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\nimport pygame\r\n\r\npygame.init()\r\n\r\ndisplay_width = 800\r\ndisplay_height = 600\r\n\r\ngameDisplay = pygame.display.set_mode((display_width,display_height))\r\npygame.display.set_caption('PyPet')\r\n\r\nblack = (0,0,0)\r\nwhite = (255,255... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nimport pygame\r\n\r\npygame.init()\r\n\r\ndisplay_width = 800\r\ndisplay_height = 600\r\n\r\ngameDisplay = pygame.display.set_mode((display_width,display_height))\r\npygame.display.set_caption('PyPet')\r\n\r\nblack = (0,0,0)\r\n... | ```python
import pygame
pygame.init()
display_width = 800
display_height = 600
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('PyPet')
black = (0,0,0)
white = (255,255,255)
clock = pygame.time.Clock()
crashed = False
catImg = pygame.image.load('resources\image\cat.png')
def cat(x,y):
gameDisplay.blit(catImg, (x,y))
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
cat_speed = 0
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
############################
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
elif event.key == pygame.K_RIGHT:
x_change = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
######################
##
x += x_change
##
gameDisplay.fill(white)
cat(x,y)
pygame.display.update()
clock.tick(60)
pygame.quit()
quit()
``` |
[
{
"content": "```python\n#!/usr/bin/python\n#\n# Fishfeedtime powers down Powerheads/Filters/Pumps of your choice (which are connected to 433.92MHz power socket controllers)\n# for a set duration.\n#\n\n## GPIO Pin Definitions\n# Pin with button circuit GPIO Pullup\nBUTTON = 25\n# LED Pin if present...\nLED = T... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/python\n#\n# Fishfeedtime powers down Powerheads/Filters/Pumps of your choice (which are connected to 433.92MHz power socket controllers)\n# for a set duration.\n#\n\n## GPIO Pin Definitions\n# Pin with button circuit GPIO Pullup\nBUTTON = 25\n# LED Pin if pre... | ```python
#!/usr/bin/python
#
# Fishfeedtime powers down Powerheads/Filters/Pumps of your choice (which are connected to 433.92MHz power socket controllers)
# for a set duration.
#
## GPIO Pin Definitions
# Pin with button circuit GPIO Pullup
BUTTON = 25
# LED Pin if present...
LED = True
LEDPIN = 11
# Pin connect to 434MHz transmit Data Line. -- NOT CONFIGURABLE, connect to *** PIN26 // GPIO7 ***
##TRANS = 7
# Seconds to turn everything off for on button push: ## 900s = 15mins
OFFTIME = 900
# Time (s) before OFFTIME to sound alert.
WARNTIME = 60
# Do we have a buzzer here?
BUZZER = True
# If "True" - which GPIO Pin to use it?
BUZPIN = 8
# Where can the https://github.com/dmcg/raspberry-strogonanoff script be found?
switchscript = "~fishfeedtime/fishfeedtime/switch"
# List of dictionaries detailing channel and buttons for the desired controlled sockets:
## Socket 1 controls the two Powerheads.
## Socket 2 is the Canister Filter
## Socket 3 is Skimmer
SOCKETS = [ {"socket": "1"}, {"socket": "2"}, {"socket": "3"} ]
# Set verbosity ## True//False
VERBOSE = False
## Import needed modules
import time, os, sys
import RPi.GPIO as GPIO
## Detect early on that we're running with root permissions!
if not os.geteuid() == 0:
sys.exit('Script must run as root')
# Setup GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if BUZZER == True:
GPIO.setup(BUZPIN, GPIO.OUT, False)
if LED == True:
GPIO.setup(LEDPIN, GPIO.OUT, False)
GPIO.setup(BUTTON, GPIO.IN)
def sockets(state):
if state == "on" or state == "off":
if VERBOSE == True:
print("Switching Sockets " + str(state))
for a in SOCKETS:
sock = str(a['socket'])
switchcmd = str(switchscript) + " " + str(sock) + " " + str(state)
if VERBOSE == True:
print(switchcmd)
os.system(switchcmd)
else:
print("Invalid state sent to sockets(): " + str(state))
raise
## Put sockets on initially (Useful if Pi init script is restarted with
## sockets in an off state.
print("Initialising: ensuring sockets are ON...")
sockets("on")
def button_state():
# If button not pressed (Open)
if (GPIO.input(BUTTON)):
return "Open"
else:
return "Closed"
def ledswitch(ledstate):
if LED == True:
if ledstate == "on":
GPIO.output(LEDPIN, True)
elif ledstate == "off":
GPIO.output(LEDPIN, False)
else:
print("Invalid state passed to ledswitch: " + str(ledstate))
raise
else:
if VERBOSE == True:
print("LED not configured.")
def sound_buzzer():
if BUZZER == True:
# Make some noise!
if VERBOSE == True:
print("Beep")
GPIO.output(BUZPIN, True)
time.sleep(0.1)
GPIO.output(BUZPIN, False)
else:
if VERBOSE == True:
print("Buzzer not configured.")
def run_timer():
print("Feed Time Started")
ledswitch("on")
sound_buzzer()
sockets("off")
start = time.time()
# Insert a small time delay to ensure that devices are not immediately switched back on if button is held down.
delay = 2
time.sleep(delay)
fulltime = int(OFFTIME - delay)
warntime = int(fulltime - WARNTIME)
warned = False
count = 0
GPIO.add_event_detect(BUTTON, GPIO.RISING, bouncetime=200)
while time.time() - start < fulltime:
if VERBOSE == True:
print("Count: " + str(count))
count = count + 1
if GPIO.event_detected(BUTTON):
if VERBOSE == True:
print("Button Push Override Detected.")
GPIO.remove_event_detect(BUTTON)
break
if time.time() - start > warntime and warned == False:
if VERBOSE == True:
print("Warning Time Reached.")
sound_buzzer()
time.sleep(0.3)
sound_buzzer()
time.sleep(0.3)
sound_buzzer()
warned = True
else:
time.sleep(1)
print("Feed Time Ended.")
GPIO.remove_event_detect(BUTTON)
sound_buzzer()
sockets("on")
ledswitch("off")
## Actual run
# Check switchcode exists:
### NEEDS FIXING
#if not os.path.isfile(switchscript):
# print("Failed to locate " + str(switchscript))
# sys.exit(1)
try:
while True:
print("Listening for feeding time...")
#GPIO.add_event_detect(BUTTON, GPIO.RISING, bouncetime=200)
GPIO.wait_for_edge(BUTTON, GPIO.RISING)
GPIO.remove_event_detect(BUTTON)
run_timer()
except KeyboardInterrupt:
print("\n\nKeyboard Interrupt. Ensuring sockets are Enabled...")
sound_buzzer()
time.sleep(0.2)
sound_buzzer()
sockets("on")
ledswitch("off")
GPIO.cleanup()
print("Exiting...")
sys.exit(0)
``` |
[
{
"content": "Repeat the full code snippet:\n```python\nimport argparse\nimport os\nimport re\nimport sys\n\nfrom django.core.management import base\n\n\nclass Command(base.BaseCommand):\n help = \\\n \"\"\"\n Clears .pyc files from the project.\n \"\"\"\n\n pattern = r'^.+\\.pyc$'\n\... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\nimport argparse\nimport os\nimport re\nimport sys\n\nfrom django.core.management import base\n\n\nclass Command(base.BaseCommand):\n help = \\\n \"\"\"\n Clears .pyc files from the project.\n \"\"\"\n\n pattern = ... | ```python
import argparse
import os
import re
import sys
from django.core.management import base
class Command(base.BaseCommand):
help = \
"""
Clears .pyc files from the project.
"""
pattern = r'^.+\.pyc$'
def add_arguments(self, parser):
parser.add_argument(
'--noinput', dest='noinput', action='store_true', default=False,
help="Do NOT prompt the user for input of any kind."
)
parser.add_argument(
'-f', '--force', dest='force', action='store_true', default=False,
help="Force the removing files without user interaction."
)
parser.add_argument(
'-p', '--with-pythonpath', dest='with_pythonpath', action='store_true', default=False,
help="Remove also PYTHONPATH libraries."
)
parser.add_argument(
'path', nargs=argparse.REMAINDER,
help="Directories with libraries"
)
def handle(self, *args, **options):
dirs = options['path'] or sys.path[:1]
if options['with_pythonpath']:
dirs += sys.path[1:]
for d in dirs:
d = d or '.'
if os.path.isdir(d) and os.access(d, os.W_OK):
for dirname, _, filenames in os.walk(d):
for filename in filenames:
fullname = os.path.join(dirname, filename)
if re.search(self.pattern, fullname):
if not options['force'] and not options['noinput']:
confirm_action = input(
"Do you want to delete '%s'? [y/N] " % fullname)
if confirm_action != 'y':
continue
os.remove(fullname)
if int(options['verbosity']) >= 2:
self.stdout.write("Removed %s" % fullname)
else:
if int(options['verbosity']) >= 2:
self.stdout.write("Skipped %s" % d)
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\n\n\ndef identity_function(x):\n return x\n\n\ndef step_function(x):\n return np.array(x > 0, dtype=np.int)\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef sigmoid_grad(x):\n return (1.0 - sigmoid(x)) *... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\n\n\ndef identity_function(x):\n return x\n\n\ndef step_function(x):\n return np.array(x > 0, dtype=np.int)\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef sigmoid_grad(x):\n return (1.0... | ```python
#!/usr/bin/env python
# coding: utf-8
import numpy as np
def identity_function(x):
return x
def step_function(x):
return np.array(x > 0, dtype=np.int)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_grad(x):
return (1.0 - sigmoid(x)) * sigmoid(x)
def relu(x):
return np.maximum(0, x)
def relu_grad(x):
grad = np.zeros(x)
grad[x >= 0] = 1
return grad
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # オーバーフロー対策
return np.exp(x) / np.sum(np.exp(x))
def mean_squared_error(y, t):
return 0.5 * np.sum((y-t)**2)
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
# 教師データがone-hot-vectorの場合、正解ラベルのインデックスに変換
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t])) / batch_size
def softmax_loss(X, t):
y = softmax(X)
return cross_entropy_error(y, t)
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#... | ```python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with Stackdriver Logging via JSON-over-HTTP."""
import functools
from google.cloud import _http
from google.cloud.iterator import HTTPIterator
from google.cloud.logging import __version__
from google.cloud.logging._helpers import entry_from_resource
from google.cloud.logging.sink import Sink
from google.cloud.logging.metric import Metric
_CLIENT_INFO = _http.CLIENT_INFO_TEMPLATE.format(__version__)
class Connection(_http.JSONConnection):
"""A connection to Google Stackdriver Logging via the JSON REST API.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client that owns the current connection.
"""
API_BASE_URL = 'https://logging.googleapis.com'
"""The base of the API call URL."""
API_VERSION = 'v2'
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}'
"""A template for the URL of a particular API call."""
_EXTRA_HEADERS = {
_http.CLIENT_INFO_HEADER: _CLIENT_INFO,
}
class _LoggingAPI(object):
"""Helper mapping logging-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_entries(self, projects, filter_=None, order_by=None,
page_size=None, page_token=None):
"""Return a page of log entry resources.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: str
:param filter_:
a filter expression. See
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: str
:param order_by: One of :data:`~google.cloud.logging.ASCENDING`
or :data:`~google.cloud.logging.DESCENDING`.
:type page_size: int
:param page_size: maximum number of entries to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry`
accessible to the current API.
"""
extra_params = {'projectIds': projects}
if filter_ is not None:
extra_params['filter'] = filter_
if order_by is not None:
extra_params['orderBy'] = order_by
if page_size is not None:
extra_params['pageSize'] = page_size
path = '/entries:list'
# We attach a mutable loggers dictionary so that as Logger
# objects are created by entry_from_resource, they can be
# re-used by other log entries from the same logger.
loggers = {}
item_to_value = functools.partial(
_item_to_entry, loggers=loggers)
iterator = HTTPIterator(
client=self._client, path=path,
item_to_value=item_to_value, items_key='entries',
page_token=page_token, extra_params=extra_params)
# This method uses POST to make a read-only request.
iterator._HTTP_METHOD = 'POST'
return iterator
def write_entries(self, entries, logger_name=None, resource=None,
labels=None):
"""API call: log an entry resource via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write
:type entries: sequence of mapping
:param entries: the log entry resources to log.
:type logger_name: str
:param logger_name: name of default logger to which to log the entries;
individual entries may override.
:type resource: mapping
:param resource: default resource to associate with entries;
individual entries may override.
:type labels: mapping
:param labels: default labels to associate with entries;
individual entries may override.
"""
data = {'entries': list(entries)}
if logger_name is not None:
data['logName'] = logger_name
if resource is not None:
data['resource'] = resource
if labels is not None:
data['labels'] = labels
self.api_request(method='POST', path='/entries:write', data=data)
def logger_delete(self, project, logger_name):
"""API call: delete all entries in a logger via a DELETE request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs/delete
:type project: str
:param project: ID of project containing the log entries to delete
:type logger_name: str
:param logger_name: name of logger containing the log entries to delete
"""
path = '/projects/%s/logs/%s' % (project, logger_name)
self.api_request(method='DELETE', path=path)
class _SinksAPI(object):
"""Helper mapping sink-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_sinks(self, project, page_size=None, page_token=None):
"""List sinks for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/list
:type project: str
:param project: ID of the project whose sinks are to be listed.
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.sink.Sink`
accessible to the current API.
"""
extra_params = {}
if page_size is not None:
extra_params['pageSize'] = page_size
path = '/projects/%s/sinks' % (project,)
return HTTPIterator(
client=self._client, path=path,
item_to_value=_item_to_sink, items_key='sinks',
page_token=page_token, extra_params=extra_params)
def sink_create(self, project, sink_name, filter_, destination):
"""API call: create a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create
:type project: str
:param project: ID of the project in which to create the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
"""
target = '/projects/%s/sinks' % (project,)
data = {
'name': sink_name,
'filter': filter_,
'destination': destination,
}
self.api_request(method='POST', path=target, data=data)
def sink_get(self, project, sink_name):
"""API call: retrieve a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:rtype: dict
:returns: The JSON sink object returned from the API.
"""
target = '/projects/%s/sinks/%s' % (project, sink_name)
return self.api_request(method='GET', path=target)
def sink_update(self, project, sink_name, filter_, destination):
"""API call: update a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:rtype: dict
:returns: The returned (updated) resource.
"""
target = '/projects/%s/sinks/%s' % (project, sink_name)
data = {
'name': sink_name,
'filter': filter_,
'destination': destination,
}
return self.api_request(method='PUT', path=target, data=data)
def sink_delete(self, project, sink_name):
"""API call: delete a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
"""
target = '/projects/%s/sinks/%s' % (project, sink_name)
self.api_request(method='DELETE', path=target)
class _MetricsAPI(object):
"""Helper mapping sink-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_metrics(self, project, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
:type project: str
:param project: ID of the project whose metrics are to be listed.
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.metric.Metric`
accessible to the current API.
"""
extra_params = {}
if page_size is not None:
extra_params['pageSize'] = page_size
path = '/projects/%s/metrics' % (project,)
return HTTPIterator(
client=self._client, path=path,
item_to_value=_item_to_metric, items_key='metrics',
page_token=page_token, extra_params=extra_params)
def metric_create(self, project, metric_name, filter_, description=None):
"""API call: create a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create
:type project: str
:param project: ID of the project in which to create the metric.
:type metric_name: str
:param metric_name: the name of the metric
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the metric.
:type description: str
:param description: description of the metric.
"""
target = '/projects/%s/metrics' % (project,)
data = {
'name': metric_name,
'filter': filter_,
'description': description,
}
self.api_request(method='POST', path=target, data=data)
def metric_get(self, project, metric_name):
"""API call: retrieve a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/get
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:rtype: dict
:returns: The JSON metric object returned from the API.
"""
target = '/projects/%s/metrics/%s' % (project, metric_name)
return self.api_request(method='GET', path=target)
def metric_update(self, project, metric_name, filter_, description):
"""API call: update a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/update
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the metric.
:type description: str
:param description: description of the metric.
:rtype: dict
:returns: The returned (updated) resource.
"""
target = '/projects/%s/metrics/%s' % (project, metric_name)
data = {
'name': metric_name,
'filter': filter_,
'description': description,
}
return self.api_request(method='PUT', path=target, data=data)
def metric_delete(self, project, metric_name):
"""API call: delete a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/delete
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric.
"""
target = '/projects/%s/metrics/%s' % (project, metric_name)
self.api_request(method='DELETE', path=target)
def _item_to_entry(iterator, resource, loggers):
"""Convert a log entry resource to the native object.
.. note::
This method does not have the correct signature to be used as
the ``item_to_value`` argument to
:class:`~google.cloud.iterator.Iterator`. It is intended to be
patched with a mutable ``loggers`` argument that can be updated
on subsequent calls. For an example, see how the method is
used above in :meth:`_LoggingAPI.list_entries`.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: Log entry JSON resource returned from the API.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The next log entry in the page.
"""
return entry_from_resource(resource, iterator.client, loggers)
def _item_to_sink(iterator, resource):
"""Convert a sink resource to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: Sink JSON resource returned from the API.
:rtype: :class:`~google.cloud.logging.sink.Sink`
:returns: The next sink in the page.
"""
return Sink.from_api_repr(resource, iterator.client)
def _item_to_metric(iterator, resource):
"""Convert a metric resource to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: Metric JSON resource returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page.
"""
return Metric.from_api_repr(resource, iterator.client)
``` |
[
{
"content": "Here is some code:\n```python\n#!/usr/bin/env python\n# Test this by entering the search string \"election\" on a command line like this:\n# /home/wevote/WeVoteServer/search/query_test_script.py election\n\nfrom elasticsearch import Elasticsearch\nimport sys\n\nes = Elasticsearch([\"172.31.24.246:... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n#!/usr/bin/env python\n# Test this by entering the search string \"election\" on a command line like this:\n# /home/wevote/WeVoteServer/search/query_test_script.py election\n\nfrom elasticsearch import Elasticsearch\nimport sys\n\nes = Elasticsearch([... | ```python
#!/usr/bin/env python
# Test this by entering the search string "election" on a command line like this:
# /home/wevote/WeVoteServer/search/query_test_script.py election
from elasticsearch import Elasticsearch
import sys
es = Elasticsearch(["172.31.24.246:9200"], timeout = 120, max_retries = 5, retry_on_timeout = True)
if len(sys.argv) < 2:
print "Usage: %s <search term>" % (sys.argv[0])
sys.exit(-1)
search_term = sys.argv[1]
#query = { "query": {"match": { "candidate_name": "Joe"}}}
#query = { "query": {"match": { "candidate_name": "Joe"}}}
#query = { "query": { "multi_match": { "type": "phrase_prefix", "query": search_term, "fields": [ "candidate_name", "candidate_twitter_handle", "twitter_name", "measure_subtitle", "measure_text", "measure_title", "office_name", "first_name", "middle_name", "last_name", "party", "organization_name", "organization_twitter_handle", "twitter_description" ] } }}
query = { "query": { "multi_match": { "type": "phrase_prefix", "query": search_term, "fields": [ "google_civic_election_id", "candidate_name", "candidate_twitter_handle", "election_name", "twitter_name", "measure_subtitle", "measure_text", "measure_title", "office_name", "party", "organization_name", "organization_twitter_handle", "twitter_description" ] } }}
query_with_election_date = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"order": "desc"}},
{"_score": {"order": "desc"}}]}
query_with_missing_last_election_date = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"missing": "_last", "order": "desc"}},
{"_score": {"order": "desc"}}]}
query_with_missing_election_date_without_order = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"missing": "1111-11-11"}},
{"_score": {"order": "desc"}}]}
query_with_election_missing_date_value = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"missing": "1111-11-11", "order": "desc"}},
{"_score": {"order": "desc"}}]}
# Example of querying ALL indexes
res = es.search(body=query)
res_with_election_date = es.search(body=query_with_election_date)
res_with_missing_last_election_date = es.search(body=query_with_missing_last_election_date)
# res_with_missing_election_date_without_order = es.search(body=query_with_missing_election_date_without_order)
# res_with_election_missing_date_value = es.search(body=query_with_election_missing_date_value)
print "Got %d hits from all index search: " % res['hits']['total']
print "Got %d hits from all index search: " % res_with_election_date['hits']['total']
print "Got %d hits from all index search: " % res_with_missing_last_election_date['hits']['total']
# print "Got %d hits from all index search: " % res_with_missing_election_date_without_order['hits']['total']
# print "Got %d hits from all index search: " % res_with_election_missing_date_value['hits']['total']
for hit in res['hits']['hits']:
print "------------- RESULT --------------"
for field in hit:
print "%s: %s" % (field, hit[field])
print "============================================"
print "============================================"
for hit in res_with_election_date['hits']['hits']:
print "------------- RESULT --------------"
for field in hit:
print "%s: %s" % (field, hit[field])
print "============================================"
print "============================================"
for hit in res_with_missing_last_election_date['hits']['hits']:
print "------------- RESULT --------------"
for field in hit:
print "%s: %s" % (field, hit[field])
print "============================================"
# print "============================================"
# for hit in res_with_missing_election_date_without_order['hits']['hits']:
# print "------------- RESULT --------------"
# for field in hit:
# print "%s: %s" % (field, hit[field])
# print "============================================"
# print "============================================"
# for hit in res_with_election_missing_date_value['hits']['hits']:
# print "------------- RESULT --------------"
# for field in hit:
# print "%s: %s" % (field, hit[field])
# example of querying single index
if (True):
res = es.search(index="elections", body={ "query": {"match": { "google_civic_election_id": "5000"}}})
print "Got %d hits from single index search: " % res['hits']['total']
for hit in res['hits']['hits']:
for field in hit:
print "%s: %s" % (field, hit[field])
``` |
[
{
"content": "```python\nimport asyncio, contextlib, io, inspect, sys, functools, subprocess, discord\nfrom discord.ext import commands\nfrom .utils import checks\n\n@contextlib.contextmanager\ndef stdoutIO(stdout=None):\n old = sys.stdout\n if stdout == None:\n stdout = io.StringIO... | [
{
"content": "<|memory_start|>```python\nimport asyncio, contextlib, io, inspect, sys, functools, subprocess, discord\nfrom discord.ext import commands\nfrom .utils import checks\n\n@contextlib.contextmanager\ndef stdoutIO(stdout=None):\n old = sys.stdout\n if stdout == None:\n stdo... | ```python
import asyncio, contextlib, io, inspect, sys, functools, subprocess, discord
from discord.ext import commands
from .utils import checks
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout == None:
stdout = io.StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
class Debug:
def __init__(self, bot):
self.bot = bot
def clean(self, code):
if code.startswith("```") and code.endswith("```"):
return "\n".join(code.split("\n")[1:-1])
return code.strip("` \n")
@checks.is_owner()
@commands.command(name='debug', pass_context=True)
async def debug_statement(self, ctx, *, content : str):
result = None
code = self.clean(content)
vals = dict(
self=self,
bot=self.bot,
message=ctx.message,
ctx=ctx,
server=ctx.message.server,
channel=ctx.message.channel,
author=ctx.message.author,
code=code,
io=io,
sys=sys,
commands=commands,
discord=discord
)
try:
precompiled = compile(code, "eval.py", "eval")
vals["compiled"] = precompiled
result = eval(precompiled, vals)
except Exception as e:
await self.bot.say("<:vpRedTick:257437215615877129> `{}: {}\n`".format(type(e).__name__, e))
return
if inspect.isawaitable(result):
result = await result
if not result is None:
result = str(result)
await self.bot.say("<:vpGreenTick:257437292820561920> Input\n`{}`\n<:vpStreaming:212789640799846400> Output\n`{}`\n".format(content, result[:1800] + "..." if len(result) > 1800 else result))
@checks.is_owner()
@commands.command(name='terminal')
async def terminal_command(self, *, command : str):
result = await self.bot.loop.run_in_executor(None, functools.partial(subprocess.run, command, stdout=subprocess.PIPE, shell=True, universal_newlines=True))
result = result.stdout
await self.bot.say("<:vpGreenTick:257437292820561920> Input\n`{}`\n<:vpStreaming:212789640799846400> Output\n`{}`\n".format(command, result[:1800] + "..." if len(result) > 1800 else result))
@checks.is_owner()
@commands.command(name='run', pass_context=True)
async def run_code(self, ctx, *, content : str):
code = self.clean(content)
code = "async def coro():\n " + "\n ".join(code.split("\n"))
vals = dict(
self=self,
bot=self.bot,
message=ctx.message,
ctx=ctx,
server=ctx.message.server,
channel=ctx.message.channel,
author=ctx.message.author,
io=io,
code=code,
sys=sys,
commands=commands,
discord=discord
)
with stdoutIO() as s:
try:
precompiled = compile(code, "exec.py", "exec")
vals["compiled"] = precompiled
result = exec(precompiled, vals)
await vals["coro"]()
except Exception as e:
await self.bot.say("<:vpRedTick:257437215615877129> `{}: {}\n`".format(type(e).__name__, e))
return
result = str(s.getvalue())
if not result == "":
await self.bot.say("<:vpGreenTick:257437292820561920> Input\n`{}`\n<:vpStreaming:212789640799846400> Output\n`{}`\n".format(content, result[:1800] + "..." if len(result) > 1800 else result))
def setup(bot):
bot.add_cog(Debug(bot))
``` |
[
{
"content": "```python\nimport os.path\n\nimport pygame.mixer\n\nimport data\n\nLOOP = -1\n\n\ndef load_music(music_ref):\n \"\"\"\n Charge une musique en mémoire mais ne la joue pas\n music_ref - La référence de la musique\n \"\"\"\n music_path = data.get_sound_path(\n os.path.join(\"mus... | [
{
"content": "<|memory_start|>```python\nimport os.path\n\nimport pygame.mixer\n\nimport data\n\nLOOP = -1\n\n\ndef load_music(music_ref):\n \"\"\"\n Charge une musique en mémoire mais ne la joue pas\n music_ref - La référence de la musique\n \"\"\"\n music_path = data.get_sound_path(\n os... | ```python
import os.path
import pygame.mixer
import data
LOOP = -1
def load_music(music_ref):
"""
Charge une musique en mémoire mais ne la joue pas
music_ref - La référence de la musique
"""
music_path = data.get_sound_path(
os.path.join("music", music_ref + ".ogg"))
pygame.mixer.music.load(music_path)
def play_music(music_ref=None, loops=0, start=0.0):
"""
Joue la musique `music_ref`, la répète `loops` fois
en commençant à la seconde `start`.
Si loops = -1, alors la musique est jouée indéfiniment
"""
if music_ref:
load_music(music_ref)
pygame.mixer.music.play(loops, start)
def loop_music(music_ref=None):
"""
Joue en boucle infinie la musique `music_ref`.
"""
play_music(music_ref, LOOP)
def stop_music(fadeout_time=0):
"""
Stop la musique en train d'être jouée.
Si fadeout_time > 0, alors la musique disparaît
en fondu qui dure `fadeout_time` ms.
"""
if fadeout_time > 0:
pygame.mixer.music.fadeout(fadeout_time)
else:
pygame.mixer.music.stop()
def toggle_music(fadeout_time=0):
"""
Active la musique si elle est éteinte, sinon,
il la stoppe
"""
if pygame.mixer.music.get_busy():
stop_music(fadeout_time)
else:
play_music()
def set_music_volume(volume):
"""
Defini le volume de la musique
"""
pygame.mixer.music.set_volume(volume)
def get_music_volume():
"""
Retourne le volume de la musique
"""
return pygame.mixer.music.get_volume()
def play_sound(sound_ref, loops=0, maxtime=0, fade_ms=0):
"""
Joue le son avec la référence `sound_ref` et le rejoue
`loops` fois
"""
sound_path = data.get_sound_path(
os.path.join("sounds", sound_ref + ".ogg"))
sound = pygame.mixer.Sound(sound_path)
pygame.mixer.find_channel().play(sound, loops, maxtime, fade_ms)
``` |
[
{
"content": "Repeat the code exactly:\n```python\nimport sys\n\ndef find_max_crossing_subarray(A, low, mid, high):\n\tleft_sum = -sys.maxint\n\tsum = 0\n\ti = mid\n\tmax_left = mid\n\twhile i >= low:\n\t\tsum += A[i]\n\t\tif sum > left_sum:\n\t\t\tmax_left = i\n\t\t\tleft_sum = sum\n\t\ti -= 1\n\n\tright_sum =... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\nimport sys\n\ndef find_max_crossing_subarray(A, low, mid, high):\n\tleft_sum = -sys.maxint\n\tsum = 0\n\ti = mid\n\tmax_left = mid\n\twhile i >= low:\n\t\tsum += A[i]\n\t\tif sum > left_sum:\n\t\t\tmax_left = i\n\t\t\tleft_sum = sum\n\t\ti -= 1\... | ```python
import sys
def find_max_crossing_subarray(A, low, mid, high):
left_sum = -sys.maxint
sum = 0
i = mid
max_left = mid
while i >= low:
sum += A[i]
if sum > left_sum:
max_left = i
left_sum = sum
i -= 1
right_sum = -sys.maxint
sum = 0
i = mid + 1
max_right = mid + 1
while i <= high:
sum += A[i]
if sum >= right_sum:
max_right = i
right_sum = sum
i += 1
return (max_left, max_right, left_sum+right_sum)
def find_maximum_subarray(A, low, high):
if low == high:
return (low, high, A[low])
else:
mid = (low + high) / 2
(left_low, left_high, left_sum) = find_maximum_subarray(A, low, mid)
(right_low, right_high, right_sum) = find_maximum_subarray(A, mid+1, high)
(cross_low, cross_high, cross_sum) = find_max_crossing_subarray(A, low, mid, high)
if left_sum >= right_sum and left_sum >= cross_sum:
return (left_low, left_high, left_sum)
elif right_sum >= left_sum and right_sum >= cross_sum:
return (right_low, right_high, right_sum)
else:
return (cross_low, cross_high, cross_sum)
A = [1, 2, 3, -100, 8, 9, 10, -100000, 2, 4, 500]
print find_maximum_subarray(A, 0, len(A)-1)
# A = [-8, -2, -4]
# print find_maximum_subarray(A, 0, len(A)-1)
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# -*- coding: utf-8 -*-\n# © 2016 Chafique DELLI @ Akretion\n# Copyright (C) 2016-Today: GRAP (http://www.grap.coop)\n# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# © 2016 Chafique DELLI @ Akretion\n# Copyright (C) 2016-Today: GRAP (http://www.grap.coop)\n# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agp... | ```python
# -*- coding: utf-8 -*-
# © 2016 Chafique DELLI @ Akretion
# Copyright (C) 2016-Today: GRAP (http://www.grap.coop)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import models, api, fields
import odoo.addons.decimal_precision as dp
class WizardUpdateInvoiceSupplierinfoLine(models.TransientModel):
_name = 'wizard.update.invoice.supplierinfo.line'
_SELECTION_STATE = [
('new_supplierinfo', 'New Supplier Info'),
('update_supplierinfo', 'Update Supplier Info'),
]
wizard_id = fields.Many2one(
comodel_name='wizard.update.invoice.supplierinfo', required=True,
ondelete='cascade')
product_id = fields.Many2one('product.product', string='Product')
supplierinfo_id = fields.Many2one(comodel_name='product.supplierinfo')
current_min_quantity = fields.Float(
related='supplierinfo_id.min_qty', readonly=True)
new_min_quantity = fields.Float(
string='New Min Quantity', required=True)
current_price = fields.Float(
related='supplierinfo_id.price',
digits=dp.get_precision('Product Price'),
readonly=True)
new_price = fields.Float(
string='New Unit Price', digits=dp.get_precision('Product Price'),
required=True)
price_variation = fields.Float(
string='Price Variation (%)', compute='_compute_price_variation',
digits=dp.get_precision('Discount'))
state = fields.Selection(selection=_SELECTION_STATE)
@api.depends('current_price', 'new_price')
@api.multi
def _compute_price_variation(self):
self.write({'price_variation': False})
for line in self.filtered('current_price'):
line.price_variation = 100 *\
(line.new_price - line.current_price) / line.current_price
# Custom Section
@api.multi
def _prepare_supplierinfo(self):
self.ensure_one()
return {
'product_tmpl_id': self.product_id.product_tmpl_id.id,
'name': self.wizard_id.invoice_id.supplier_partner_id.id,
'min_qty': 0.0,
'price': self.new_price,
'delay': 1,
}
``` |
[
{
"content": "```python\n#!/usr/bin/python\n#\n# linearize-data.py: Construct a linear, no-fork version of the chain.\n#\n# Copyright (c) 2013-2014 The Moneta developers\n# Distributed under the MIT/X11 software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/python\n#\n# linearize-data.py: Construct a linear, no-fork version of the chain.\n#\n# Copyright (c) 2013-2014 The Moneta developers\n# Distributed under the MIT/X11 software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/... | ```python
#!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Moneta developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import base64
import httplib
import sys
import hashlib
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
if not self.fileOutput and ((self.outsz + self.inLen) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = "%s/blk%05d.dat" % (self.settings['output'], outFn)
print("Output file" + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return "%s/blk%05d.dat" % (self.settings['input'], fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file" + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic:" + inMagic)
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" in blkmap:
print("not found")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\nimport unittest\n\nfrom astropy import constants as astroconst\nfrom astropy import units as u\nfrom astropy.time import Time\n\nfrom CelestialMechanics.kepler import constants\nfrom CelestialMechanics.orbits import ellips... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\nimport unittest\n\nfrom astropy import constants as astroconst\nfrom astropy import units as u\nfrom astropy.time import Time\n\nfrom CelestialMechanics.kepler import constants\nfrom CelestialMechanics.orbi... | ```python
import unittest
from astropy import constants as astroconst
from astropy import units as u
from astropy.time import Time
from CelestialMechanics.kepler import constants
from CelestialMechanics.orbits import ellipse
from CelestialMechanics.orbits.ellipse import delta_t_t0_aeangle
class MyTestCase(unittest.TestCase):
def test_ellipse(self):
r = ellipse.r(1.5236164, 0.0932802, 32.)
self.assertAlmostEqual(1.3996391, r, places=7)
a, e = ellipse.ae(0.275, 1.168)
self.assertAlmostEqual(0.722, a, places=3)
self.assertAlmostEqual(0.618, e, places=2)
sun = astroconst.M_sun
mercury = astroconst.M_sun / constants.Mercury
energy = ellipse.E((0.38709 * u.au).to(u.m), sun, mercury)
self.assertAlmostEqual(-3.817E32, energy.value, delta=1E32)
# 4.14
a = 17.8 * u.au
e = 0.967
q, Q = ellipse.qQ(a, e)
self.assertAlmostEqual(0.031478, ellipse.v_sun(q, a, 0).value, places=5)
self.assertAlmostEqual(0.000528, ellipse.v_sun(Q, a, 0).value, places=5)
self.assertAlmostEqual(54.50, ellipse.v_sun(q, a, 0).to(u.km / u.s).value, places=2)
self.assertAlmostEqual(0.91, ellipse.v_sun(Q, a, 0).to(u.km / u.s).value, places=2)
vq, vQ = ellipse.vqQ_sun(a, e, 0)
self.assertAlmostEqual(0.031478, vq.value, places=2)
self.assertAlmostEqual(0.000528, vQ.value, places=2)
self.assertAlmostEqual(54.50, vq.to(u.km / u.s).value, places=2)
self.assertAlmostEqual(0.91, vQ.to(u.km / u.s).value, places=2)
# 4.15
a = astroconst.R_earth + 560 * u.km
self.assertAlmostEqual(7569.5, ellipse.v(a, a, astroconst.M_earth, 0).value, delta=20)
def test_chapter_5(self):
from CelestialMechanics.mu import mu_sun
# 5.5
t0 = Time('2014-01-03T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.633 * u.d
t1 = Time('2014-04-03T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.9 * u.d
t2 = Time('2014-10-05T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.5 * u.d
a = 1 * u.au
e = 0.01669
r = 1 * u.au
mu = mu_sun(1 / constants.Earth_Moon)
angles = ellipse.angles(a, e, r)
self.assertAlmostEqual(90.9563109612867, angles[0].value)
self.assertAlmostEqual(269.0436890387133, angles[1].value)
delta_t_t0 = delta_t_t0_aeangle(a, e, angles[0], mu) % (1 * u.yr).to(u.d) # module 1 year
self.assertAlmostEqual((t1 - t0).value, delta_t_t0.value, delta=0.1)
delta_t_t0 = delta_t_t0_aeangle(a, e, angles[1], mu) % (1 * u.yr).to(u.d) # module 1 year
self.assertAlmostEqual((t2 - t0).value, delta_t_t0.value, delta=0.1)
# 5.6
a = 17.834144 * u.au
e = 0.967143
angle = 60 * u.deg
mu = mu_sun(0)
delta_t_t0 = delta_t_t0_aeangle(a, e, angle, mu)
self.assertAlmostEqual(23.7573, delta_t_t0.value, places=2)
# 5.7
t0 = Time('2003-10-23T05:57:10Z', format='isot', scale='utc').jd * u.d
t1 = Time('2007-06-20T00:00:00Z', format='isot', scale='utc').jd * u.d
a = 2.56743 * u.au
e = 0.75355
r = 2.325364 * u.au
mu = mu_sun(0)
angles = ellipse.angles(a, e, r)
self.assertAlmostEqual(360 - 226.064389, angles[0].value, places=5)
self.assertAlmostEqual(226.064389, angles[1].value, places=5)
angle = angles[1] # r. < 0
# inlined ellipse.delta_t_t0_aeangle()
E = ellipse.E_angle(e, angle)
M = ellipse.angle_M_eE(e, E)
from CelestialMechanics.kepler.kepler3 import T_sun
T = T_sun(a, 0) # 1 year (of the minor planet)
delta_t_t0 = ellipse.delta_t_t0_Mn(M, ellipse.n(a, mu)) % T # module 1 year (of the minor planet)
self.assertAlmostEqual(277.187625, E.to(u.deg).value % 360, places=6)
self.assertAlmostEqual(320.023578, M.to(u.deg).value % 360, places=6)
self.assertAlmostEqual(((t1 - t0) % T).value, delta_t_t0.value, places=4)
t0_calculated = t1 - delta_t_t0
# print(Time(t0_calculated, format='jd', scale='utc').isot)
self.assertAlmostEqual(t0.value, t0_calculated.value, places=4)
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "```python\n#! /usr/bin/env python3\n# Name: PySongGen\n#\n# Version: 0.0.1\n#\n# Author: Sinuhe Jaime Valencia\n#\n# Author_email: sierisimo@gmail.com\n#\n# Description:\n# Main code for running instances of pysonggen\n\n\nfrom pysonggen import grammar\nfrom pysonggen.songgen import SongG\n\ng... | [
{
"content": "<|memory_start|>```python\n#! /usr/bin/env python3\n# Name: PySongGen\n#\n# Version: 0.0.1\n#\n# Author: Sinuhe Jaime Valencia\n#\n# Author_email: sierisimo@gmail.com\n#\n# Description:\n# Main code for running instances of pysonggen\n\n\nfrom pysonggen import grammar\nfrom pysonggen.songgen i... | ```python
#! /usr/bin/env python3
# Name: PySongGen
#
# Version: 0.0.1
#
# Author: Sinuhe Jaime Valencia
#
# Author_email: sierisimo@gmail.com
#
# Description:
# Main code for running instances of pysonggen
from pysonggen import grammar
from pysonggen.songgen import SongG
gram = grammar.Grammar('./examples/example.mgram')
notes = None
audio = None
run = True
def get_phrase():
global notes
global audio
notes = gram.expand(input("""Give a sentence for making a song.
It's very IMPORTANT that use spaces between every letter
Example: A A A B
->"""))
audio = SongG(notes)
print("Your song is now ready, it has: " + str(len(notes)) +" notes.")
print("\n The length of the final song will be the same size, because we're using just one second per note")
def change_name():
global audio
print("Actual name is: "+audio.name+".ogg")
print("Ok. Let's give the song an awesome Name:")
name=input("New name: ")
audio.name = name
def save_song():
global audio
if audio != None:
audio.save_song()
else:
print("You have to make a song first...")
def print_notes():
global audio
if audio != None:
print("There you are, this are your notes:")
for i in audio.notes:
print(i,end=" ")
else:
print("You haven't make a song first...")
print("\n")
def exit_without_save():
print("See you later aligator")
while run:
options = {"s":save_song,
"c":change_name,
"n":get_phrase,
"w":print_notes,
"e":""
}
if audio == None:
decision = input("""
What do you want to do now?
n Make a new song
e Exit
Your choice: """)
else:
decision = input("""What do you want to do now?
s Store Song (With default name: Song.ogg)
c Change name of the song (The extension cannot be changed)
n Make a new song
w See the notes
e Exit
Your choice: """)
if len(decision) != 1 or not decision in list(options.keys()):
print("Invalid Option. Please choose a valid one")
continue
elif decision == "e":
exit_without_save()
break
options[decision]()
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# -*- coding: iso-8859-1 -*-\n# Copyright (C) 2000-2009 Bastian Kleineidam\n\"\"\"\nGroup filter rules into folders.\n\"\"\"\nfrom ... import fileutil, configuration\nfrom . import Rule\n\n\ndef recalc_up_down(rules):\n ... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# -*- coding: iso-8859-1 -*-\n# Copyright (C) 2000-2009 Bastian Kleineidam\n\"\"\"\nGroup filter rules into folders.\n\"\"\"\nfrom ... import fileutil, configuration\nfrom . import Rule\n\n\ndef recalc_up_d... | ```python
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2009 Bastian Kleineidam
"""
Group filter rules into folders.
"""
from ... import fileutil, configuration
from . import Rule
def recalc_up_down(rules):
"""
Add .up and .down attributes to rules, used for display up/down
arrows in GUIs
"""
upper = len(rules)-1
for i, rule in enumerate(rules):
rule.up = (i>0)
rule.down = (i<upper)
class FolderRule(Rule.Rule):
"""
Container for a list of rules.
"""
def __init__(self, sid=None, titles=None, descriptions=None,
disable=0, filename=""):
"""
Initialize rule data.
"""
super(FolderRule, self).__init__(sid=sid, titles=titles,
descriptions=descriptions, disable=disable)
# make filename read-only
self._filename = filename
self.rules = []
self.attrnames.extend(('oid', 'configversion'))
self.intattrs.append('oid')
self.oid = None
self.configversion = "-"
def __str__(self):
"""
Return rule data as string.
"""
return super(FolderRule, self).__str__() + \
("\nrules: %d" % len(self.rules))
def filename_get(self):
"""
Get filename where this folder is stored.
"""
return self._filename
filename = property(filename_get)
def append_rule(self, r):
"""
Append rule to folder.
"""
r.oid = len(self.rules)
# note: the rules are added in order
self.rules.append(r)
r.parent = self
def delete_rule(self, i):
"""
Delete rule from folder with index i.
"""
del self.rules[i]
recalc_up_down(self.rules)
def update(self, rule, dryrun=False, log=None):
"""
Update this folder with given folder rule data.
"""
chg = super(FolderRule, self).update(rule, dryrun=dryrun, log=log)
for child in rule.rules:
if child.sid is None or not child.sid.startswith("wc"):
# ignore local rules
continue
oldrule = self.get_rule(child.sid)
if oldrule is not None:
if oldrule.update(child, dryrun=dryrun, log=log):
chg = True
else:
print >> log, _("inserting new rule %s") % \
child.tiptext()
if not dryrun:
self.rules.append(child)
chg = True
if chg:
recalc_up_down(self.rules)
return chg
def get_rule(self, sid):
"""
Return rule with given sid or None if not found.
"""
for rule in self.rules:
if rule.sid == sid:
return rule
return None
def toxml(self):
"""
Rule data as XML for storing.
"""
s = u"""<?xml version="1.0" encoding="%s"?>
<!DOCTYPE folder SYSTEM "filter.dtd">
%s oid="%d" configversion="%s">""" % \
(configuration.ConfigCharset, super(FolderRule, self).toxml(),
self.oid, self.configversion)
s += u"\n"+self.title_desc_toxml()+u"\n"
for r in self.rules:
s += u"\n%s\n" % r.toxml()
return s+u"</folder>\n"
def write(self, fd=None):
"""
Write xml data into filename.
@raise: OSError if file could not be written.
"""
s = self.toxml().encode("iso-8859-1", "replace")
if fd is None:
fileutil.write_file(self.filename, s)
else:
fd.write(s)
def tiptext(self):
"""
Return short info for gui display.
"""
l = len(self.rules)
if l == 1:
text = _("with 1 rule")
else:
text = _("with %d rules") % l
return "%s %s" % (super(FolderRule, self).tiptext(), text)
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n#@+leo-ver=5-thin\n#@+node:2014fall.20141212095015.1775: * @file wsgi.py\n# coding=utf-8\n# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用\n\n################# (1) 模組導入區\n# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝\n\n\n#@@language python\n#@@tabwidth... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n#@+leo-ver=5-thin\n#@+node:2014fall.20141212095015.1775: * @file wsgi.py\n# coding=utf-8\n# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用\n\n################# (1) 模組導入區\n# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝\n\n\n#@@language pyt... | ```python
#@+leo-ver=5-thin
#@+node:2014fall.20141212095015.1775: * @file wsgi.py
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi)
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
# 導入 gear 模組
import gear
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
#@-<<declarations>>
#@+others
#@+node:2014fall.20141212095015.1777: ** class Hello
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
#@+others
#@+node:2014fall.20141212095015.2004: *3* __init__
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
#@+node:2014fall.20141212095015.1778: *3* index_orig
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141212095015.1779: *3* hello
@cherrypy.expose
def hello(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141215194146.1791: *3* index
@cherrypy.expose
def two(self, guess=None):
# 將標準答案存入 answer session 對應區
theanswer = random.randint(1, 100)
thecount = 0
# 將答案與計算次數變數存進 session 對應變數
cherrypy.session['answer'] = theanswer
cherrypy.session['count'] = thecount
# 印出讓使用者輸入的超文件表單
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>
<hr>
<!-- 以下在網頁內嵌 Brython 程式 -->
<script type="text/python">
from browser import document, alert
def echo(ev):
alert(document["zone"].value)
# 將文件中名稱為 mybutton 的物件, 透過 click 事件與 echo 函式 bind 在一起
document['mybutton'].bind('click',echo)
</script>
<input id="zone"><button id="mybutton">click !</button>
<hr>
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
# 以下使用中文變數名稱
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1713: *3* twoDgear
@cherrypy.expose
# A齒數1,B齒數2,C齒數3,D齒數4,E齒數5,F齒數6, M 為模數, P 為壓力角
def index(self,A=None,B=None,C=None,D=None, E=None, F=None, M=None, P=None):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
40123103<br>周于婷
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=mygeartest2>
<p>齒數1:
<select name=A>
<option>10
<option>11
<option>12
<option>13
<option>14
<option>15
<option>16
<option>17
<option>18
<option>19
<option>20
<option>21
<option>22
<option>23
<option>24
<option>25
<option>26
<option>27
<option>28
<option>29
<option>30
</select>
<p>齒數2:
<select name=B>
<option>10
<option>11
<option>12
<option>13
<option>14
<option>15
<option>16
<option>17
<option>18
<option>19
<option>20
<option>21
<option>22
<option>23
<option>24
<option>25
<option>26
<option>27
<option>28
<option>29
<option>30
</select>
<p>齒數3:
<select name=C>
<option>10
<option>11
<option>12
<option>13
<option>14
<option>15
<option>16
<option>17
<option>18
<option>19
<option>20
<option>21
<option>22
<option>23
<option>24
<option>25
<option>26
<option>27
<option>28
<option>29
<option>30
</select>
<p>齒數4:
<select name=D>
<option>10
<option>11
<option>12
<option>13
<option>14
<option>15
<option>16
<option>17
<option>18
<option>19
<option>20
<option>21
<option>22
<option>23
<option>24
<option>25
<option>26
<option>27
<option>28
<option>29
<option>30
</select>
<p>齒數5:
<select name=E>
<option>10
<option>11
<option>12
<option>13
<option>14
<option>15
<option>16
<option>17
<option>18
<option>19
<option>20
<option>21
<option>22
<option>23
<option>24
<option>25
<option>26
<option>27
<option>28
<option>29
<option>30
</select>
</select>
<p>齒數6:
<select name=F>
<option>10
<option>11
<option>12
<option>13
<option>14
<option>15
<option>16
<option>17
<option>18
<option>19
<option>20
<option>21
<option>22
<option>23
<option>24
<option>25
<option>26
<option>27
<option>28
<option>29
<option>30
</select>
<p>模數:
<select name=M>
<option>2
<option>3
<option>4
<option>5
<option>6
<option>7
<option>8
<option>9
<option>10
<option>11
<option>12
<option>13
<option>14
<option>15
<option>16
<option>17
<option>18
<option>19
<option>20
<option>21
<option>22
<option>23
<option>24
<option>25
</select>
<p>壓力角:
<select name=P>
<option>14.5
<option>15.0
<option>15.5
<option>16.0
<option>16.5
<option>17.0
<option>17.5
<option>18.0
<option>18.5
<option>19.0
<option>19.5
<option>20.0
<option>20.5
<option>21.0
<option>21.5
<option>22.0
<option>22.5
</select>
</br>
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1733: *3* threeDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def threeDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do3Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1762: *3* do2Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do2Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1735: *3* do3Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do3Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1765: *3* mygeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:amd.20150415215023.1: *3* mygeartest2
@cherrypy.expose
# A齒數1,B齒數2,C齒數3,D齒數4,E齒數5,F齒數6, M 為模數, P 為壓力角
def mygeartest2(self,A=15,B=17,C=15,D=17, E=15, F=17, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<a href='/index'>返回</a>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa ='''+str(P)+'''
# m 為模數
m = '''+str(M)+'''
# 第1齒輪齒數
n_g1 = '''+str(A)+'''
# 第2齒輪齒數
n_g2 = '''+str(B)+'''
# 第3齒輪齒數
n_g3 = '''+str(C)+'''
# 第4齒輪齒數
n_g4 = '''+str(D)+'''
# 第5齒輪齒數
n_g5 = '''+str(E)+'''
# 第6齒輪齒數
n_g6 = '''+str(F)+'''
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
rp_g3 = m*n_g3/2
rp_g4 = m*n_g4/2
rp_g5 = m*n_g5/2
rp_g6 = m*n_g6/2
# 繪圖第1齒輪的圓心座標
x_g1 = 280
y_g1 = 400
# 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g2 = x_g1 + rp_g1 + rp_g2
y_g2 = y_g1
# 第3齒輪的圓心座標
x_g3 = x_g1 + rp_g1 + 2*rp_g2 + rp_g3
y_g3 = y_g1
# 第4齒輪的圓心座標
x_g4 = x_g1 + rp_g1 + 2*rp_g2 +2* rp_g3+ rp_g4
y_g4 = y_g1
# 第5齒輪的圓心座標
x_g5 = x_g1 + rp_g1 + 2*rp_g2 +2* rp_g3+2* rp_g4 + rp_g5
y_g5 = y_g1
# 第6齒輪的圓心座標
x_g6 = x_g1 + rp_g1 + 2*rp_g2 +2* rp_g3+2* rp_g4+2*rp_g5+rp_g6
y_g6 = y_g1
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g1, y_g1)
# rotate to engage
ctx.rotate(pi/2)
# put it back
ctx.translate(-x_g1, -y_g1)
spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "red")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g2, y_g2)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g2)
# put it back
ctx.translate(-x_g2, -y_g2)
spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "Orange")
ctx.restore()
# 將第3齒輪逆時鐘轉 90 度之後, 再往回轉第2齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g3, y_g3)
# rotate to engage
# pi+pi/n_g2 為第2齒輪從順時鐘轉 90 度之後, 必須配合目前的標記線所作的齒輪 2 轉動角度, 要轉換到齒輪3 的轉動角度
# 必須乘上兩齒輪齒數的比例, 若齒輪2 大, 則齒輪3 會轉動較快
# 第1個 -pi/2 為將原先垂直的第3齒輪定位線逆時鐘旋轉 90 度
# -pi/n_g3 則是第3齒與第2齒定位線重合後, 必須再逆時鐘多轉一齒的轉角, 以便進行囓合
# (pi+pi/n_g2)*n_g2/n_g3 則是第2齒原定位線為順時鐘轉動 90 度,
# 但是第2齒輪為了與第1齒輪囓合, 已經距離定位線, 多轉了 180 度, 再加上第2齒輪的一齒角度, 因為要帶動第3齒輪定位,
# 這個修正角度必須要再配合第2齒與第3齒的轉速比加以轉換成第3齒輪的轉角, 因此乘上 n_g2/n_g3
ctx.rotate(-pi/2-pi/n_g3+(pi+pi/n_g2)*n_g2/n_g3)
# put it back
ctx.translate(-x_g3, -y_g3)
spur.Spur(ctx).Gear(x_g3, y_g3, rp_g3, n_g3, pa, "yellow")
ctx.restore()
#第4齒輪
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g4, y_g4)
# rotate to engage
ctx.rotate(-pi/2+(pi+pi/n_g3)*n_g3/n_g4)
# put it back
ctx.translate(-x_g4, -y_g4)
spur.Spur(ctx).Gear(x_g4, y_g4, rp_g4, n_g4, pa, "green")
ctx.restore()
#第5齒輪
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g5, y_g5)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g5+(pi+pi/n_g4)*n_g4/n_g5)
# put it back
ctx.translate(-x_g5, -y_g5)
spur.Spur(ctx).Gear(x_g5, y_g5, rp_g5, n_g5, pa, "blue")
ctx.restore()
#第6齒輪(囓合有問題)
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g6, y_g6)
# rotate to engage
ctx.rotate(-pi/2+pi/n_g6+(pi+pi/n_g5)*n_g5/n_g6)
# put it back
ctx.translate(-x_g6, -y_g6)
spur.Spur(ctx).Gear(x_g6, y_g6, rp_g6, n_g6, pa, "purple")
ctx.restore()
</script>
<canvas id="plotarea" width="4000" height="1200"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1737: *3* my3Dgeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def my3Dgeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def gear(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
gear(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:2014fall.20141215194146.1793: *3* doCheck
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
#@+node:2014fall.20141215194146.1789: *3* guessform
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
#@-others
#@-others
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Hello()
root.gear = gear.Gear()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
#@-leo
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'CustomEmail.system'\n ... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'CustomEmai... | ```python
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CustomEmail.system'
db.add_column('admin_custom_emails', 'system',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'CustomEmail.school'
db.add_column('admin_custom_emails', 'school',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['student.School'], on_delete=models.PROTECT),
keep_default=False)
# Adding field 'CustomEmail.owner'
db.add_column('admin_custom_emails', 'owner',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['student.UserProfile'], null=True, on_delete=models.PROTECT, blank=True),
keep_default=False)
# Adding field 'CustomEmail.state'
db.add_column('admin_custom_emails', 'state',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['student.State'], on_delete=models.PROTECT),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CustomEmail.system'
db.delete_column('admin_custom_emails', 'system')
# Deleting field 'CustomEmail.school'
db.delete_column('admin_custom_emails', 'school_id')
# Deleting field 'CustomEmail.owner'
db.delete_column('admin_custom_emails', 'owner_id')
# Deleting field 'CustomEmail.state'
db.delete_column('admin_custom_emails', 'state_id')
models = {
'administration.adjustmenttimelog': {
'Meta': {'object_name': 'AdjustmentTimeLog', 'db_table': "'adjustment_time_log'"},
'adjustment_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'admin_email': ('django.db.models.fields.CharField', [], {'max_length': '75', 'db_index': 'True'}),
'comments': ('django.db.models.fields.CharField', [], {'max_length': '756', 'null': 'True', 'db_index': 'True'}),
'course_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'db_index': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'user_email': ('django.db.models.fields.CharField', [], {'max_length': '75', 'db_index': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'max_length': '11'})
},
'administration.author': {
'Meta': {'object_name': 'Author', 'db_table': "'author'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'administration.certificate': {
'Meta': {'object_name': 'Certificate', 'db_table': "'certificate'"},
'association': ('django.db.models.fields.IntegerField', [], {}),
'association_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['administration.CertificateAssociationType']"}),
'certificate_blob': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'certificate_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'administration.certificateassociationtype': {
'Meta': {'object_name': 'CertificateAssociationType', 'db_table': "'certificate_association_type'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'administration.customemail': {
'Meta': {'object_name': 'CustomEmail', 'db_table': "'admin_custom_emails'"},
'district': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.District']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'email_content': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.UserProfile']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.School']", 'on_delete': 'models.PROTECT'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.State']", 'on_delete': 'models.PROTECT'}),
'system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'on_delete': 'models.PROTECT'})
},
'administration.emailtask': {
'Meta': {'object_name': 'EmailTask', 'db_table': "'admin_email_task'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'process_emails': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'success_emails': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'total_emails': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['auth.User']"})
},
'administration.emailtasklog': {
'Meta': {'object_name': 'EmailTaskLog', 'db_table': "'admin_email_task_log'"},
'district_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75', 'db_index': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'send_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['administration.EmailTask']", 'on_delete': 'models.PROTECT'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'})
},
'administration.filterfavorite': {
'Meta': {'object_name': 'FilterFavorite', 'db_table': "'admin_filter_favorite'"},
'filter_json': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'administration.hangoutpermissions': {
'Meta': {'object_name': 'HangoutPermissions', 'db_table': "'hangout_permissions'"},
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.District']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'administration.importtask': {
'Meta': {'object_name': 'ImportTask', 'db_table': "'admin_import_task'"},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'process_lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'success_lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'total_lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['auth.User']"})
},
'administration.importtasklog': {
'Meta': {'object_name': 'ImportTaskLog', 'db_table': "'admin_import_task_log'"},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_data': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'line': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['administration.ImportTask']", 'on_delete': 'models.PROTECT'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'})
},
'administration.pepreginstructor': {
'Meta': {'object_name': 'PepRegInstructor', 'db_table': "'pepreg_instructor'"},
'all_delete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'all_edit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_create': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['administration.PepRegTraining']"}),
'user_create': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"})
},
'administration.pepregstudent': {
'Meta': {'object_name': 'PepRegStudent', 'db_table': "'pepreg_student'"},
'date_create': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modify': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'student_credit': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'student_status': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['administration.PepRegTraining']"}),
'user_create': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'user_modify': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"})
},
'administration.pepregtraining': {
'Meta': {'object_name': 'PepRegTraining', 'db_table': "'pepreg_training'"},
'allow_attendance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_registration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_student_attendance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_validation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'attendancel_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'classroom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'credits': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'date_create': ('django.db.models.fields.DateField', [], {}),
'date_modify': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.District']"}),
'geo_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_props': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'max_registration': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pepper_course': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'school_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'training_date': ('django.db.models.fields.DateField', [], {}),
'training_time_end': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'training_time_start': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_create': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'user_modify': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"})
},
'administration.timereportperm': {
'Meta': {'object_name': 'TimeReportPerm', 'db_table': "'time_report_perm'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['auth.User']"})
},
'administration.timereporttask': {
'Meta': {'object_name': 'TimeReportTask', 'db_table': "'admin_time_report_task'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'process_num': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'success_num': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'total_num': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['auth.User']"})
},
'administration.userlogininfo': {
'Meta': {'object_name': 'UserLoginInfo', 'db_table': "'user_login_info'"},
'create_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_session': ('django.db.models.fields.IntegerField', [], {'max_length': '15'}),
'login_time': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'login_times': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '15'}),
'logout_press': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logout_time': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'temp_time': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'total_session': ('django.db.models.fields.IntegerField', [], {'max_length': '30'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'max_length': '11'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.cohort': {
'Meta': {'object_name': 'Cohort', 'db_table': "'cohort'"},
'code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.District']", 'on_delete': 'models.PROTECT'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'licences': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'term_months': ('django.db.models.fields.IntegerField', [], {})
},
'student.district': {
'Meta': {'object_name': 'District', 'db_table': "'district'"},
'code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.State']", 'on_delete': 'models.PROTECT'})
},
'student.school': {
'Meta': {'object_name': 'School', 'db_table': "'school'"},
'code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.District']", 'on_delete': 'models.PROTECT'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'student.state': {
'Meta': {'object_name': 'State', 'db_table': "'state'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'so': ('django.db.models.fields.IntegerField', [], {})
},
'student.subjectarea': {
'Meta': {'object_name': 'SubjectArea', 'db_table': "'subject_area'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'so': ('django.db.models.fields.IntegerField', [], {})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'activate_date': ('django.db.models.fields.DateTimeField', [], {}),
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bio': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'cohort': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['student.Cohort']", 'on_delete': 'models.PROTECT'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.District']", 'on_delete': 'models.PROTECT'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'grade_level_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_date': ('django.db.models.fields.DateTimeField', [], {}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'last_activity': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'major_subject_area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.SubjectArea']", 'on_delete': 'models.PROTECT'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'people_of': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2048', 'blank': 'True'}),
'percent_eng_learner': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'percent_iep': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'percent_lunch': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.School']", 'on_delete': 'models.PROTECT'}),
'skype_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sso_idp': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sso_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'sso_user_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subscription_status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'years_in_education': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.YearsInEducation']", 'on_delete': 'models.PROTECT'})
},
'student.yearsineducation': {
'Meta': {'object_name': 'YearsInEducation', 'db_table': "'years_in_education'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'so': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['administration']
``` |
[
{
"content": "```python\n\"\"\"\nThis file is part of PUQ\nCopyright (c) 2013 PUQ Authors\nSee LICENSE file for terms.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport time, os, re, h5py, sys, string\nimport numpy as np\nfrom puq.testprogram import TestProgram\nfrom numpy impo... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nThis file is part of PUQ\nCopyright (c) 2013 PUQ Authors\nSee LICENSE file for terms.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport time, os, re, h5py, sys, string\nimport numpy as np\nfrom puq.testprogram import TestProgram\... | ```python
"""
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
from __future__ import absolute_import, division, print_function
import time, os, re, h5py, sys, string
import numpy as np
from puq.testprogram import TestProgram
from numpy import ndarray
from puq.hdf import get_output_names
from logging import debug
from puq.util import vprint
from puq.options import options
from puq.jpickle import pickle, unpickle
from socket import gethostname
from puq.parameter import get_psamples
import getpass
from puq.calibrate import calibrate
# for python3
if sys.version[0] == "3":
raw_input = input
py3 = True
else:
py3 = False
_vcache = {}
_dcache = {}
class Sweep(object):
"""
Creates an object that contains all the information about
a parameter sweep.
Args:
psweep: Parameter Sweep object. See :class:`PSweep`.
host: Host object. See :class:`Host`.
prog: TestProgram object: See :class:`TestProgram`.
caldata(array: Experimental data for calibration. Optional.
calerr(float): Measurement error in the experimental data.
description(string): Optional description of this run.
"""
def __init__(self, psweep, host, prog, caldata=None, calerr=None, description=''):
self.host = host
self._reinit = False
if isinstance(prog, TestProgram):
self.prog = prog
else:
self.prog = TestProgram(prog)
if description == '':
description = self.prog.desc
self.description = description
# optional calibration data
self.caldata = caldata
self.err = calerr
# trying to get 10Hz resolution, 1 year clock
secperyear = 365*24*60*60
self.fname = 'sweep_%s' % int((time.time() % secperyear) * 10)
self.psweep = psweep
self.host.prog = self.prog
self.input_script = os.path.abspath(sys.argv[0])
def _save_hdf5(self):
debug('')
h5 = h5py.File(self.fname + '.hdf5')
# write HDF5 header information, once only
if 'version' not in h5.attrs:
h5.attrs['MEMOSA_UQ'] = b'MEMOSA'
h5.attrs['version'] = 201
# h5.attrs['id'] = self.id
h5.attrs['date'] = time.strftime("%b %d %H:%M %Z %Y", time.localtime())
h5.attrs['hostname'] = gethostname()
h5.attrs['username'] = getpass.getuser()
h5.attrs['UQtype'] = self.psweep.__class__.__name__.lower()
h5.attrs['description'] = self.description
# overwrite previous
if 'input' in h5:
del h5['input']
if 'private' in h5:
del h5['private']
hp = h5.require_group('private')
hp['sweep'] = pickle(self)
# in /input write the input params in json and regular arrays
h = h5.require_group('input')
# basic parameter table for non-python reading of the hdf5 file
h['param_array'] = np.column_stack([p.values for p in self.psweep.params])
if py3:
h['param_array'].attrs['name'] = [bytes(p.name, 'UTF-8') for p in self.psweep.params]
h['param_array'].attrs['description'] = [bytes(p.description, 'UTF-8') for p in self.psweep.params]
else:
h['param_array'].attrs['name'] = [str(p.name) for p in self.psweep.params]
h['param_array'].attrs['description'] = [str(p.description) for p in self.psweep.params]
# json-pickled parameters
h = h.require_group('params')
for p in self.psweep.params:
h[p.name] = pickle(p)
h[p.name].attrs['description'] = p.description
h[p.name].attrs['label'] = p.label
if hasattr(self.psweep, 'kde'):
h5['input/kde'] = pickle(self.psweep.kde)
# input script
if hasattr(self, 'input_script'):
h5['input/scriptname'] = str(self.input_script)
try:
h5['input/script'] = open(self.input_script).read()
except:
h5['input/script'] = "Source was unavailable."
h5.close()
def _save_and_run(self):
self._save_hdf5()
res = self.host.run()
if res:
self._save_hdf5()
return res
def run(self, fn=None, overwrite=False):
"""
Calls PSweep.run() to run all the jobs in the Sweep. Collect the data
from the outputs and call the PSweep analyze method. If the PSweep method
has an iterative callback defined, call it, otherwise return.
Args:
fn(string): HDF5 filename for output. '.hdf5' will be
appended to the filename if necessary. If fn is None,
a filename will be generated starting with "sweep\_"
followed by a timestamp.
overwrite(boolean): If True and fn is not None, will
silently overwrite any previous files of the same name.
Returns:
True on success.
"""
if fn is not None:
self.fname = os.path.splitext(fn)[0]
fn = self.fname + '.hdf5'
if os.path.exists(fn):
if not overwrite:
done = False
while 1:
ans = raw_input('%s already exists. Replace (Y/N):' % fn)
try:
if ans.upper() == 'N':
done = True
break
elif ans.upper() == 'Y':
break
except:
pass
print("Please answer with 'Y' or 'N'\n")
if done:
sys.exit(-1)
os.remove(fn)
vprint(1, 'Saving run to %s.hdf5' % self.fname)
return self.psweep.run(self)
def extend(self, num=None):
return self.psweep.extend(num)
def collect_data(self, hf=None):
""" Collects data from captured stdout files and puts it in arrays
in 'output/data'. Returns True on success.
"""
need_to_close = False
if hf is None:
hf = h5py.File(self.fname + '.hdf5')
need_to_close = True
finished_jobs = self.host.collect(hf)
self._extract_hdf5(hf, finished_jobs)
has_data = 'data' in hf['output']
if has_data:
outd = hf['output/data']
data = dict([(x, outd[x].value) for x in outd])
params = dict([(p.name, p.values) for p in self.psweep.params])
if need_to_close:
hf.close()
if not has_data and not self._reinit:
print("WARNING: There is no data in the output section!")
print("Check that your runs completed successfully.")
return False
return params, data
def analyze_errors(self, hf):
p = re.compile('Command exited with non-zero status \d+')
for job in hf['output/jobs']:
if job == 'time':
continue
err = hf['output/jobs/%s/stderr' % job].value
res = p.findall(err)
if res:
print("Job %s: %s" % (job, res[0]))
for line in err.split('\n'):
if line != res[0] and not line.startswith('HDF5:{'):
print(line)
elif len(err) == 0:
print("Job %s never completed. Walltime exceeded?" % job)
results = False
out = hf['output/jobs/%s/stdout' % job].value
for line in out.split('\n'):
if line.startswith('HDF5:{'):
results = True
break
if not results:
print("ERROR: Job %s has no output data in stdout." % job)
def analyze(self, verbose=False):
"""
Collects the output from all the jobs into an HDF5 file.
Parses any tagged data in the output and puts it in
the /data group in the HDF5 file.
"""
debug('')
hf = h5py.File(self.fname + '.hdf5')
if not self.host.status(quiet=1)[1]:
print("Cannot collect data or perform analysis until all jobs are completed.")
print("You should do 'puq resume' to resume jobs.")
sys.exit(-1)
# collect the data if it has not already been collected.
has_data = 'output' in hf and 'data' in hf['output']
if not has_data:
self.collect_data(hf)
try:
self.psweep.analyze(hf)
except:
print('Warning: analysis failed.')
errors = 1
# quick error check
if 'data' in hf['output']:
errors = 0
try:
options[self.psweep.__class__.__name__]['verbose'] = verbose
except KeyError:
options[self.psweep.__class__.__name__] = {'verbose': verbose}
for var in hf['output/data']:
if not isinstance(hf['output/data/%s' % var], h5py.Group):
tlen = len(hf['output/data/%s' % var].value)
num_jobs = len(hf['output/jobs'])
if 'time' in hf['output/jobs']:
num_jobs -= 1
if tlen != num_jobs:
errors += 1
print("Expected %s data points for variable %s, but got %s." % (num_jobs, var, tlen))
self.analyze_errors(hf)
return errors
if 'psamples' not in hf:
s = get_psamples(self.psweep.params)
if s is not None:
hf['psamples'] = s
# FIXME check for correlation if multiple outputs
# calibrate
if hasattr(self, 'caldata') and self.caldata is not None:
self._calibrate(hf)
hf.close()
self._save_hdf5()
return errors
# Bayesian Calibration
def _calibrate(self, hf):
ovar = get_output_names(hf)[0]
method = hf.attrs['UQtype']
rs = unpickle(hf["/%s/%s/response" % (method, ovar)].value)
# print "Calling calibrate from sweep"
self.psweep.params, self.psweep.kde = calibrate(self.psweep.params, self.caldata, self.err, rs.eval)
def _dump_hdf5_cache(self, hf, d):
global _vcache, _dcache
if len(_vcache):
if d:
dgrp = hf.require_group('output/data')
else:
dgrp = hf.require_group('output/jobs')
for n in _vcache:
if n in dgrp:
del dgrp[n]
adata = _vcache[n]
if d and len(adata.shape) > 1:
# Data is a multidimensional array and we want to do analysis
# on each array element individually. So we write them
# individually to /output/data
numvals = np.prod(adata.shape[1:])
for i, index in enumerate(np.ndindex(adata.shape[1:])):
name = '%s%s' % (n, [ind for ind in index])
data = adata.flatten()[i::numvals]
ds = dgrp.create_dataset(name, data=data)
ds.attrs["description"] = _dcache[n]
else:
ds = dgrp.create_dataset(n, data=adata)
ds.attrs["description"] = str(_dcache[n])
_vcache = {}
_dcache = {}
def _dump_hdf5(self, grp, line, job, mjob):
debug("Dump %s : %s" % (job, line))
global _vcache, _dcache
# old format used single quotes.
if line.startswith("{'"):
line = line.replace("'", '"')
x = unpickle(line)
v = x['value']
n = x['name']
if n not in _vcache:
if isinstance(v, ndarray):
_vcache[n] = np.empty([mjob] + list(v.shape))
else:
_vcache[n] = np.empty((mjob))
_vcache[n].fill(np.nan)
_dcache[n] = x['desc']
_vcache[n][job] = v
# Extract tagged data to hdf5
def _extract_hdf5(self, hf, jobs):
debug("Extract")
mjob = np.max(jobs) + 1
run_grp = hf.require_group('output/jobs')
for ext in ['out', 'err']:
for j in jobs:
grp = run_grp.require_group(str(j))
if not 'std%s' % ext in grp:
continue
f = grp['std%s' % ext].value
cont = False
for line in f.splitlines():
if cont:
line = line.strip()
cline += line
if line.endswith(':5FDH'):
cont = False
cline = cline[:-5]
self._dump_hdf5(grp, cline, j, mjob)
elif line.startswith('HDF5:'):
line = line[5:].strip()
if line.endswith(':5FDH'):
line = line[:-5]
self._dump_hdf5(grp, line, j, mjob)
else:
cont = True
cline = line
elif ext == 'err':
print('STDERR[job %d]: %s' % (j, line))
self._dump_hdf5_cache(hf, ext == 'out')
def resume(self):
if hasattr(self.host, 'jobs'):
self.host.run()
self._save_hdf5()
self.analyze()
else:
print("All jobs finished.")
``` |
[
{
"content": "Here is a code snippet:\n```python\n# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/dialogs/conversion_error.ui'\n#\n# Created: Thu Jul 19 23:32:30 2012\n# by: PyQt4 UI code generator 4.9.1\n#\n# WARNING! All changes made ... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/dialogs/conversion_error.ui'\n#\n# Created: Thu Jul 19 23:32:30 2012\n# by: PyQt4 UI code generator 4.9.1\n#\n# WARNING! A... | ```python
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/dialogs/conversion_error.ui'
#
# Created: Thu Jul 19 23:32:30 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ConversionErrorDialog(object):
def setupUi(self, ConversionErrorDialog):
ConversionErrorDialog.setObjectName(_fromUtf8("ConversionErrorDialog"))
ConversionErrorDialog.resize(658, 515)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("lt.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ConversionErrorDialog.setWindowIcon(icon)
self.gridlayout = QtGui.QGridLayout(ConversionErrorDialog)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.label = QtGui.QLabel(ConversionErrorDialog)
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8(I("dialog_error.png"))))
self.label.setObjectName(_fromUtf8("label"))
self.gridlayout.addWidget(self.label, 0, 0, 1, 1)
self.text = QtGui.QTextBrowser(ConversionErrorDialog)
self.text.setObjectName(_fromUtf8("text"))
self.gridlayout.addWidget(self.text, 0, 1, 2, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridlayout.addItem(spacerItem, 1, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(ConversionErrorDialog)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridlayout.addWidget(self.buttonBox, 2, 1, 1, 1)
self.retranslateUi(ConversionErrorDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), ConversionErrorDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), ConversionErrorDialog.reject)
QtCore.QMetaObject.connectSlotsByName(ConversionErrorDialog)
def retranslateUi(self, ConversionErrorDialog):
ConversionErrorDialog.setWindowTitle(_("ERROR"))
``` |
[
{
"content": "```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"A simple (improved + 1) ACBT scraper to download course files (by /u/chehanr).\"\"\"\n\nimport getpass\nimport os\nimport urllib\nfrom argparse import ArgumentParser\n\nimport requests\nfrom bs4 import BeautifulSoup, SoupStrainer\... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"A simple (improved + 1) ACBT scraper to download course files (by /u/chehanr).\"\"\"\n\nimport getpass\nimport os\nimport urllib\nfrom argparse import ArgumentParser\n\nimport requests\nfrom bs4 import BeautifulSou... | ```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A simple (improved + 1) ACBT scraper to download course files (by /u/chehanr)."""
import getpass
import os
import urllib
from argparse import ArgumentParser
import requests
from bs4 import BeautifulSoup, SoupStrainer
CWD = os.getcwd()
class Download:
"""Download resource files.
:param username: Username,
:param subject_name: Subject Name,
:param week: Week number
"""
def __init__(self, username, subject_name, week):
self.username = username
self.subject_name = subject_name
self.week = week
self.path = '%s/scrape2/%s/%s/Week %s/' % (
CWD, self.username.upper(), self.subject_name, self.week)
def resource(self, resource_uri, resource_title):
"""Downloading the resource files."""
resource_url = 'https://learning.acbt.lk/moodle/mod/resource/%s' % (
resource_uri)
if urllib.request.getproxies():
os.system('wget --load-cookies "%s/cookies.txt" --content-disposition --show-progress --progress=bar:force -N -c "%s" -P "%s" -e use_proxy=yes -e http_proxy="%s" -e https_proxy="%s"' %
(CWD, resource_url, self.path, urllib.request.getproxies().get('http'), urllib.request.getproxies().get('https')))
else:
os.system('wget --load-cookies "%s/cookies.txt" --content-disposition --show-progress --progress=bar:force -N -c "%s" -P "%s"' %
(CWD, resource_url, self.path))
class Scrape:
"""Initial scrape.
:param session: Current session
"""
def __init__(self, session):
self.session = session
def subjects(self):
"""Returns subject list."""
response = self.session.get('https://learning.acbt.lk/moodle')
strainer = SoupStrainer(
'div', attrs={'class': 'block_course_list sideblock'})
soup = BeautifulSoup(
response.content, 'lxml', parse_only=strainer)
subjects_list = []
for _ in soup.find_all('div', attrs={'class': 'content'}):
for _ in _.find_all('ul', attrs={'class': 'list'}):
for li_subject in _.find_all('li'):
for subject in li_subject.find_all('div', attrs={'class': 'column c1'}):
_subject_name = subject.text
_subject_code = subject.find('a')['title']
subject_url = subject.find('a')['href']
subject_id = subject_url.split('id=', 1)[1]
subject_name = '%s (%s)' % (
_subject_code.upper(), _subject_name)
subjects_list.append(
(subject_name, subject_url, subject_id))
return subjects_list
def resources(self, subject_id):
"""Returns resources list."""
resources_list = []
week = 0
params = {'id': subject_id}
response = self.session.get(
'https://learning.acbt.lk/moodle/mod/resource/index.php', params=params)
strainer = SoupStrainer(
'table', attrs={'class': 'generaltable boxaligncenter'})
soup = BeautifulSoup(response.content, 'lxml', parse_only=strainer)
for row in soup.find_all('tr'):
week_td = row.find_all('td', attrs={'class': 'cell c0'})
resource_td = row.find_all('td', attrs={'class': 'cell c1'})
for _week in week_td:
try:
week = int(_week.get_text().strip())
except:
pass
for resource in resource_td:
resource_uri = resource.find('a')['href']
resource_title = resource.get_text().strip()
if 'view.php?id=' in resource_uri:
resources_list.append(
(week, resource_uri, resource_title))
return resources_list
def subject_list_display(subjects):
"""Returns the list of subjects."""
_subjects = 'available subjects:\n'
for i, subject in enumerate(subjects):
subject_name, _, _ = subject
_subjects += '%s. %s\n' % (i + 1, subject_name)
return _subjects
def create_cookies_file(session):
"For wget."
moodle_id_expire = None
cookies = session.cookies
for cookie in cookies:
if cookie.name == 'MOODLEID_':
moodle_id_expire = cookie.expires
cookie_dict = cookies.get_dict()
cookie_text = 'learning.acbt.lk\tTRUE\t/\tFALSE\t%s\tMOODLEID_\t%s\nlearning.acbt.lk\tTRUE\t/\tFALSE\t0\tMoodleSessionTest\t%s\nlearning.acbt.lk\tTRUE\t/\tTRUE\t0\tNVT\t%s' % (
moodle_id_expire, cookie_dict.get('MOODLEID_'), cookie_dict.get('MoodleSessionTest'), cookie_dict.get('NVT'))
with open(CWD + '/cookies.txt', 'w') as f:
f.write(cookie_text)
def main(username, password, specific_subject, specific_week, list_subjects):
"""Main work."""
if not username:
username = input('moodle username: ')
if not password:
password = getpass.getpass('moodle password (hidden): ')
try:
params = {'username': username, 'password': password}
session = requests.Session()
session.post('https://learning.acbt.lk/user/login',
data=params, proxies=urllib.request.getproxies())
except Exception as err:
print(err)
else:
scrape = Scrape(session)
subjects = scrape.subjects()
create_cookies_file(session)
week_list = []
if specific_week:
week_list = [int(item) for item in specific_week.split(',')]
subject_list = []
if specific_subject:
subject_list = [item.strip().upper()
for item in specific_subject.split(',')]
if list_subjects:
print(subject_list_display(subjects))
else:
def _download_resources(resources, subject_name, week_list=None):
for resource in resources:
week, resource_uri, resource_title = resource
download = Download(username, subject_name, week)
if specific_week is None:
download.resource(resource_uri, resource_title)
else:
if week in week_list:
download.resource(resource_uri, resource_title)
for subject in subjects:
subject_name, _, subject_id = subject
resources = scrape.resources(subject_id)
if subject_list and week_list:
for _subject in subject_list:
if _subject in subject_name.upper():
print('\ndownloading resources from %s in week %s' %
(subject_name, week_list))
_download_resources(
resources, subject_name, week_list)
elif subject_list or week_list:
if subject_list:
for _subject in subject_list:
if _subject in subject_name.upper():
print('\ndownloading all resources from %s' %
(subject_name))
_download_resources(resources, subject_name)
elif week_list:
print('\ndownloading resources from %s in week %s' %
(subject_name, week_list))
_download_resources(
resources, subject_name, week_list)
else:
print('\ndownloading all resources from %s' %
(subject_name))
_download_resources(resources, subject_name)
def arg_parse():
"""Argument parser."""
parser = ArgumentParser(prog='moodlescrapr',
description='ACBT moodle scraper (by chehanr)')
parser.add_argument('-u', '--username', action='store', dest='username',
help='moodle username', required=False)
parser.add_argument('-p', '--password', action='store', dest='password',
help='moodle password', required=False)
parser.add_argument('-s', '--subject', action='store', dest='subject',
help='scrape only specific subject (comma separated)', required=False)
parser.add_argument('-w', '--week', action='store', dest='week',
help='scrape only specific week number (comma separated)', required=False)
parser.add_argument('-l', '--list-subjects', action='store_true', dest='list_subjects',
help='list available subjects', required=False)
results = parser.parse_args()
return results
if __name__ == '__main__':
args = arg_parse()
main(args.username, args.password,
args.subject, args.week, args.list_subjects)
``` |
[
{
"content": "```python\nimport os\nimport sys\n\nimport tensorflow as tf\nimport Input\n\nimport os, re\n\nFLAGS = tf.app.flags.FLAGS\nTOWER_NAME = 'tower'\n\ntf.app.flags.DEFINE_integer('batch_size', 1, \"hello\")\n\ndef _activation_summary(x):\n with tf.device('/cpu:0'):\n tensor_name = re.sub('%s_... | [
{
"content": "<|memory_start|>```python\nimport os\nimport sys\n\nimport tensorflow as tf\nimport Input\n\nimport os, re\n\nFLAGS = tf.app.flags.FLAGS\nTOWER_NAME = 'tower'\n\ntf.app.flags.DEFINE_integer('batch_size', 1, \"hello\")\n\ndef _activation_summary(x):\n with tf.device('/cpu:0'):\n tensor_na... | ```python
import os
import sys
import tensorflow as tf
import Input
import os, re
FLAGS = tf.app.flags.FLAGS
TOWER_NAME = 'tower'
tf.app.flags.DEFINE_integer('batch_size', 1, "hello")
def _activation_summary(x):
with tf.device('/cpu:0'):
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def inputs():
images, labels = Input.inputs(batch_size = FLAGS.batch_size)
return images, labels
def eval_inputs():
data_dir = 'VALIDATION'
images, labels = Input.eval_inputs(data_dir = data_dir, batch_size = 1)
return images, labels
def weight_variable(name, shape):
with tf.device('/gpu:0'):
initial = tf.random_normal(shape, stddev=0.035)
var = tf.Variable(initial, name)
return var
def bias_variable(shape):
with tf.device('/cpu:0'):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
def conv(images, W):
return tf.nn.conv2d(images, W, strides = [1, 1, 1, 1], padding = 'SAME')
def forward_propagation(images, dropout_value):
with tf.variable_scope("conv1"):
with tf.device('/gpu:0'):
conv1_feature = weight_variable('conv1_feature', [11, 11, 3, 10])
conv1_bias = bias_variable([10])
image_matrix = tf.reshape(images, [-1, 200, 200, 3])
conv1_result = tf.nn.relu(conv(image_matrix, conv1_feature) + conv1_bias)
_activation_summary(conv1_result)
with tf.device('/cpu:0'):
kernel_transposed = tf.transpose(conv1_feature, [3, 0, 1, 2])
tf.summary.image('conv1/filters', kernel_transposed, max_outputs=10)
conv1_pool = tf.nn.max_pool(conv1_result, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
with tf.variable_scope("conv2"):
conv2_feature = weight_variable('conv2_feature', [3, 3, 10, 20])
conv2_bias = bias_variable([20])
conv2_result = tf.nn.relu(conv(conv1_pool, conv2_feature) + conv2_bias)
_activation_summary(conv2_result)
conv2_pool = tf.nn.max_pool(conv2_result, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
with tf.variable_scope("conv3"):
conv3_feature = weight_variable('conv3_feature', [3, 3, 20, 30])
conv3_bias = bias_variable([30])
conv3_result = tf.nn.relu(conv(conv2_pool, conv3_feature) + conv3_bias)
_activation_summary(conv3_result)
conv3_pool = tf.nn.max_pool(conv3_result, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
with tf.variable_scope("conv4"):
conv4_feature = weight_variable('conv4_feature', [3, 3, 30, 30])
conv4_bias = bias_variable([30])
conv4_result = tf.nn.relu(conv(conv3_pool, conv4_feature) + conv4_bias)
with tf.variable_scope("conv5"):
conv5_feature = weight_variable('conv5_feature', [3, 3, 30, 15])
conv5_bias = bias_variable([15])
conv5_result = tf.nn.relu(conv(conv4_result, conv5_feature) + conv5_bias)
with tf.variable_scope("fcl"):
perceptron1_weight = weight_variable('perceptron1_weight', [25 * 25 * 15, 25 * 25 * 15])
perceptron1_bias = bias_variable([25 * 25 * 15])
flatten_dense_connect = tf.reshape(conv5_result, [-1, 25 * 25 * 15])
compute_perceptron1_layer = tf.nn.relu(tf.matmul(flatten_dense_connect, perceptron1_weight) + perceptron1_bias)
dropout = tf.nn.dropout(compute_perceptron1_layer, dropout_value)
_activation_summary(compute_perceptron1_layer)
perceptron2_weight = weight_variable('perceptron2_weight', [25 * 25 * 15, 4])
perceptron2_bias = bias_variable([4])
result1 = tf.matmul(dropout, perceptron2_weight) + perceptron2_bias
_activation_summary(result1)
return result1
def error(forward_propagation_results, labels):
with tf.device('/cpu:0'):
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=forward_propagation_results, labels=labels)
cost = tf.reduce_mean(cross_entropy)
tf.add_to_collection('loss', cost)
total_loss = tf.add_n(tf.get_collection('loss'), name='total_loss')
_activation_summary(total_loss)
return total_loss
def train(cost):
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.001
rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.95, staircase=True)
train_loss = tf.train.GradientDescentOptimizer(learning_rate = rate).minimize(cost, global_step=global_step)
return train_loss
``` |
[
{
"content": "```python\n# -*- encoding: utf-8 -*-\nfrom django.views.generic import TemplateView\n\nfrom braces.views import (\n LoginRequiredMixin,\n StaffuserRequiredMixin,\n)\n\nfrom base.view_utils import BaseMixin\nfrom block.forms import ContentEmptyForm\nfrom block.views import (\n ContentCreat... | [
{
"content": "<|memory_start|>```python\n# -*- encoding: utf-8 -*-\nfrom django.views.generic import TemplateView\n\nfrom braces.views import (\n LoginRequiredMixin,\n StaffuserRequiredMixin,\n)\n\nfrom base.view_utils import BaseMixin\nfrom block.forms import ContentEmptyForm\nfrom block.views import (\n... | ```python
# -*- encoding: utf-8 -*-
from django.views.generic import TemplateView
from braces.views import (
LoginRequiredMixin,
StaffuserRequiredMixin,
)
from base.view_utils import BaseMixin
from block.forms import ContentEmptyForm
from block.views import (
ContentCreateView,
ContentPublishView,
ContentRemoveView,
ContentUpdateView,
PageTemplateView,
)
from .forms import TitleForm
from .models import (
Title,
TitleBlock,
)
class ExampleView(PageTemplateView):
def get_context_data(self, **kwargs):
context = super(ExampleView, self).get_context_data(**kwargs)
context.update(dict(
calendar=('Jan', 'Feb', 'Mar'),
))
return context
class SettingsView(BaseMixin, TemplateView):
template_name = 'example/settings.html'
class TitleCreateView(
LoginRequiredMixin, StaffuserRequiredMixin, ContentCreateView):
block_class = TitleBlock
form_class = TitleForm
model = Title
template_name = 'example/title_update.html'
class TitleUpdateView(
LoginRequiredMixin, StaffuserRequiredMixin, ContentUpdateView):
form_class = TitleForm
model = Title
template_name = 'example/title_update.html'
class TitlePublishView(
LoginRequiredMixin, StaffuserRequiredMixin, ContentPublishView):
form_class = ContentEmptyForm
model = Title
template_name = 'example/title_publish.html'
class TitleRemoveView(
LoginRequiredMixin, StaffuserRequiredMixin, ContentRemoveView):
form_class = ContentEmptyForm
model = Title
template_name = 'example/title_remove.html'
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 7 16:19:11 2017\n\n@author: psamtik071\n\"\"\"\n\nfrom workflow.data import trip_data\nimport os\n\n\nfor year in xrange(2017,2018):\n for month in xrang... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 7 16:19:11 2017\n\n@author: psamtik071\n\"\"\"\n\nfrom workflow.data import trip_data\nimport os\n\n\nfor year in xrange(2017,2018):\n fo... | ```python
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 16:19:11 2017
@author: psamtik071
"""
from workflow.data import trip_data
import os
for year in xrange(2017,2018):
for month in xrange(1,13):
basepath = 'tripdata/'
to_filename = '{}{:02}-citibike-tripdata.csv'.format(year, month)
path = basepath + to_filename
print "cleaning trips from {}".format(path)
if os.path.exists(to_filename):
print "{} already exists".format(to_filename)
pass
else:
df = pd.read_csv(path)
# rename columns
new_cols = ['duration', 'start_time', 'stop_time', 'start_id', 'start_name',
'start_lat', 'start_long', 'stop_id', 'stop_name', 'stop_lat',
'stop_long', 'bike_id', 'user_type', 'birth_year', 'gender']
df.columns = new_cols
df.start_time = pd.to_datetime(df.start_time, format = '%Y-%m-%d %H:%M:%S')
df.stop_time = pd.to_datetime(df.stop_time, format = '%Y-%m-%d %H:%M:%S')
df.to_csv(to_filename,index = None)
``` |
[
{
"content": "```python\n\"\"\"Event arrays are 2D label arrays (time x ROI) that are generated from an\narray of fluorescent traces of the same size.\n\n Uses the following inequality to determine if an event occured at a specific time in a cell:\n dF/F of cell > (baseline of cell + std_threshold * s... | [
{
"content": "<|memory_start|>```python\n\"\"\"Event arrays are 2D label arrays (time x ROI) that are generated from an\narray of fluorescent traces of the same size.\n\n Uses the following inequality to determine if an event occured at a specific time in a cell:\n dF/F of cell > (baseline of cell + s... | ```python
"""Event arrays are 2D label arrays (time x ROI) that are generated from an
array of fluorescent traces of the same size.
Uses the following inequality to determine if an event occured at a specific time in a cell:
dF/F of cell > (baseline of cell + std_threshold * std of cell * alpha)
See the findEvents() docstring for more info.
These routines are used to create and analyze event arrays. Note that
some of the event utility functions return masked numpy arrays. This
is because generally, there are different number of events in each
cell during each trial. Anywhere there wasn't an event is a 'np.nan'
value, and the mask will ensure that it isn't used to calcuate things
like mean(), min(), max() etc.
"""
import numpy as np
import traces as tm
from sklearn.mixture import GMM
import scipy.ndimage as nd
import mahotas
__all__ = ['findEvents', 'findEventsGMM', 'findEventsBackground',
'getCounts', 'getStartsAndStops', 'getDurations', 'getAvgAmplitudes', 'getWeightedEvents',
'fitGaussianMixture1D', 'getGMMBaselines']
#----------------------------------------EVENT FINDING FUNCTIONS AND WRAPPERS-----------------------------------
def findEvents(traces, stds, std_threshold=2.5, falling_std_threshold=None, baselines=None, boxWidth=3, minimum_length=2, alpha=None):
"""Core event finding routine with flexible syntax.
Uses the following inequality to determine if an event occured at a specific time in a cell:
dF/F of cell > (baseline of cell + std_threshold * std of cell * alpha)
By default, the baseline is 0.0 (the dF/F traces have been baselined). This baseline can be
explicitly specified using the `baselines` parameter. If `baselines` is a 1d array, it is a
global correction value. If `baselines` is exactly the same size as `traces`, the routine
assumes that the baselines have been explicitly specificed across all cells, trials and frames.
If `baselines` is of size (time x trials), then the routine assumes that the basline value has
been determined for the whole population on a trial by trial basis. This is done in the routines
`findEventsBackground` and `findEventsGMM`.
The `alpha` parameter is here for flexibility. It allows for the scaling of the threshold of detection
on a cell by cell, frame by frame basis indepedent of the noise of a cell or it's baseline value.
If specified it must be the exact same size as `traces`. By default it is set to 1.0.
The routine returns an event array exactly the same size as `traces`, where each event is labeled with
a unique number (an integer). The background is labeled with '0'. This can be used in all the utility
routines below.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:param: alpha - optional scaling parameter for adjusting thresholds
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
time, cells, trials = traces.shape
events = np.zeros_like(traces)
# broadcasting of baselines. ends up as time x cells x trials. this is really annoying,
# but relying on numpy to broadcast things was tricky and problembatic. idea here is to
# get baselines identical to traces
if baselines is None: # no baseline correction, default
full_baselines = np.zeros_like(traces)
elif baselines.shape == (time): # one global correction
full_baselines = np.zeros_like(traces)
for trial in range(trials):
for cell in range(cells):
full_baselines[:,cell,trial] = baselines
elif baselines.shape ==(time, cells): # full, but only one trial
full_baselines = baselines[:,:,None]
elif baselines.shape == (time, trials): # modeled on a trial by trial basis
full_baselines = np.zeros_like(traces)
for trial in range(trials):
for cell in range(cells):
full_baselines[:,cell,trial] = baselines[:,trial]
# this is a check to prevent a dip in the global population from calling stuff responders
# basically, if the estimated baseline falls below zero, we fall back to the implicit background
# value of 0.0
full_baselines[full_baselines<0.0] = 0.0
# alpha is a scaling factor for event detection. if used it has to be the same size and shape as traces.
# no broadcasting is done here. it scales the threshold for detection so by default it is 1.0 everywhere.
if alpha is None:
alpha = np.ones_like(full_baselines)
# smooth traces and baselines
if boxWidth is not 0:
traces_smoothed = nd.convolve1d(traces, np.array([1]*boxWidth)/float(boxWidth), axis=0)
baselines_smoothed = nd.convolve1d(full_baselines, np.array([1]*boxWidth)/float(boxWidth), axis=0)
# detect events
for trial in range(trials):
for cell in range(cells):
events[:,cell,trial] = traces_smoothed[:,cell,trial] > baselines_smoothed[:,cell,trial] + (stds[cell, trial] * float(std_threshold) * alpha[:,cell,trial])
# filter for minimum length
events = mahotas.label(events, np.array([1,1])[:,np.newaxis,np.newaxis])[0]
for single_event in range(1, events.max()+1):
if (events == single_event).sum() <= minimum_length:
events[events == single_event] = 0
events = events>0
# if a falling std is specified, extend events until they drop below that threshold
if falling_std_threshold is not None:
for trial in range(trials):
for cell in range(cells):
falling_thresh_events = traces_smoothed[:,cell,trial] > baselines_smoothed[:,cell,trial] + (stds[cell, trial] * float(falling_std_threshold) * alpha[:,cell,trial])
for event_end in np.argwhere(np.diff(events[:,cell,trial].astype(int)) == -1):
j = event_end
while (j<time) and ((events[j,cell,trial]) or (falling_thresh_events[j])):
events[j,cell,trial] = events[j-1,cell,trial]
j = j + 1
# finally label the event array and return it.
events = mahotas.label(events>0, np.array([1,1])[:,np.newaxis,np.newaxis])[0]
return np.squeeze(events)
def findEventsGMM(traces, stds, std_threshold=2.5, falling_std_threshold=None, boxWidth=3, minimum_length=2):
"""Wrapper for findEvents with baseline estimation using a mixture of gaussians model.
The major idea here is to use a mixture of two gaussians to model
the baselines within each trial as a mixture of two gaussians -
one for the 'baseline' and one for all the 'bright' responding
pixels. At each time point, the ROI brightnesses are fit with
with this GMM. The means of the two distributions are initialized
to the background 'cell' and all points brighter than the mean of
all ROIs. After fitting, the smaller of the two means at every
point is taken to be the 'background'. This generally is very
close to the average of the entire frame, but is generally smaller
during full field events, because the larger gaussian 'sucks up'
the spurious bright pixels.
See getGMMBaselines() for more information.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
baselines = getGMMBaselines(traces) # time x trials (one population baseline trace for all cells)
return findEvents(traces, stds, std_threshold, falling_std_threshold, baselines, boxWidth, minimum_length)
def findEventsBackground(traces, stds, std_threshold=2.5, falling_std_threshold=None, boxWidth=3, minimum_length=2):
"""Wrapper for findEvents with baseline estimation using the background..
Here, we estimate the population baseline for all the cells as the
'background cell', or cell 0. It is generally a fair estimation
of the general response of the field of view, but is imperfect due
to segmentation errors.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
baselines = traces[:,0,:].copy() # time x trials (one population baseline trace for all cells)
return findEvents(traces, stds, std_threshold, falling_std_threshold, baselines, boxWidth, minimum_length)
#----------------------------------------EVENT UTILITY FUNCTIONS-----------------------------------
def getStartsAndStops(event_array):
"""This routine takes an event_array and returns the starting and
stopping times for all events in the array.
:param: event_array - 2d or 3d numpy event array (time x cells, or time x cells x trials))
:returns: masked numpy arrays, one for starting times and stopping times.
size is cells x max event number or cells x trials x max event number.
masked array is to account for the variable number of events in each cell
"""
event_array = np.atleast_3d(event_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
starts = np.zeros((cells, trials, int(max_num_events)))
stops = np.zeros((cells, trials, int(max_num_events)))
starts[:] = np.nan
stops[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
starts[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).flatten()[0]
stops[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).flatten()[-1]
starts = np.ma.array(starts, mask=np.isnan(starts))
starts = np.squeeze(starts)
stops = np.ma.array(stops, mask=np.isnan(stops))
stops = np.squeeze(stops)
return starts, stops
def getCounts(event_array, time_range=None):
"""This routine takes an event_array and optionally a time range
and returns the number of events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 1d or 2d numpy array of counts (cells or cells x trials)
"""
if time_range is not None:
event_array = event_array[time_range[0]:time_range[1],:] # note that this works for 2 or 3d arrays...
if event_array.ndim is 2:
event_array = event_array[:,:,np.newaxis]
time, cells, trials = event_array.shape
counts = np.zeros((cells,trials))
for trial in range(trials):
for cell in range(cells):
counts[cell, trial] = np.unique(event_array[:,cell,trial]).size - 1
return np.squeeze(counts)
def getDurations(event_array, time_range=None):
"""This routine takes an event_array (time x cells) and returns
the duration of events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 2d masked numpy array of event durations. size is cells x largest number of events.
masked entries are to account for variable number of events
"""
event_array = np.atleast_3d(event_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
durations = np.zeros((cells, trials, int(max_num_events)))
durations[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
durations[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).size
durations = np.ma.array(durations, mask=np.isnan(durations))
durations = np.squeeze(durations)
return durations
def getAvgAmplitudes(event_array, trace_array, time_range=None):
"""This routine takes an event_array (time x cells) and
corresponding trace array and returns the average amplitudes of
events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 2d masked numpy array of event average amplitudes. size is cells x largest number of events.
masked entries are account for variable number of events
"""
event_array = np.atleast_3d(event_array)
trace_array= np.atleast_3d(trace_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
amps = np.zeros((cells, trials, int(max_num_events)))
amps[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
amps[cell, trial, i] = trace_array[event_array == event_id].mean()
amps = np.ma.array(amps, mask=np.isnan(amps))
amps = np.squeeze(amps)
return np.ma.masked_array(amps, np.isnan(amps))
def getWeightedEvents(event_array, trace_array):
"""This routine takes an event array and corresponding trace array
and replaces the event labels with the average amplitude of the
event.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: trace_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:returns: 2d numpy array same shape and size of event_array, zero where there
weren't events, and the average event amplitude for the event otherwise.
"""
weighted_events = np.zeros_like(event_array, dtype=float)
for i in np.unique(event_array)[1:]:
weighted_events[event_array==i] = trace_array[event_array==i].mean()
return weighted_events
#----------------------------------------GMM UTILITY FUNCTIONS-----------------------------------
def fitGaussianMixture1D(data, n=2, set_mean_priors=True):
"""Routine for fitting a 1d array to a mixture of `n` gaussians.
if 'set_mean_priors' is True (the default), we initialize the GMM
model with means equal to the first point (the 'background' cell)
and all ROIs larger than the mean. Otherwise, we have random means.
After fitting, we return the means, stds, and weights of the GMM,
along with the BIC, AIC, and the model itself.
:param: data - 1d array of data to fit
:param: n - number of gaussians to fit, defaults to 2
:param: set_mean_priors - boolean, if true, initializes the means of a mixture of 2 gaussians
:returns: tuple of (means, stds, weights, BIC, AIC, GMM model object)
"""
if set_mean_priors:
g = GMM(n_components=n, init_params='wc', n_init=5)
g.means_ = np.zeros((n, 1))
g.means_[0,0] = data[0] # first datapoint is the background value... should be near 0.0
g.means_[1,0] = data[data > data[0]].mean()
else:
g = GMM(n_components=n, n_init=5)
g.fit(data)
return (np.squeeze(g.means_.flatten()),
np.squeeze(np.sqrt(g.covars_).flatten()),
np.squeeze(g.weights_).flatten(),
g.bic(data),
g.aic(data),
g)
def getGMMBaselines(traces):
"""Wrapper for fitGaussianMixture1D() for findEventsGMM().
:param: traces - 2 or 3d numpy array of dF/F (time x cells, or time x cells x trials)
:returns: 1 or 2d numpy array of estimated baseline (time or time x trials).
"""
traces = np.atleast_3d(traces) # time x cells x trials
time, cells, trials = traces.shape
gmmBaselines = np.zeros((time, trials)) # one baseline estimation for each trial
for trial in range(trials):
for frame in range(time):
means, stds, weights, bic, aic, model = fitGaussianMixture1D(traces[frame,:,trial], 2)
gmmBaselines[frame, trial] = means.min()
return gmmBaselines
#----------------------------------------DEPRECATED EVENT FINDING FUNCTIONS-----------------------------------
def findEventsAtThreshold(traces, stds, rising_threshold, falling_threshold=0.75, first_mode='rising', second_mode='falling', boxWidth=3, distance_cutoff=2):
"""----------------DEPRECATED-----------------------------
Routine to find events based on the method in Dombeck et al., 2007.
Relies on the multi-dimensional findLevels function in traceRoutines.
Finds all two sets of points in `traces` that cross threshold multiples
of `stds`. The first_mode and second_mode parameters determine if the
crossings are rising, or falling. The trace is filtered with a flat
kernel of width `boxWidth` and successive crossings are paired. Any
crossings less that `distance_cutoff` apart are discarded.
This routine is called by findEventsDombeck().
:param: traces - 2 or 3d numpy array of dF/F traces (time x cells, or time x cells x trial)
:param: stds - 1 or 2d numpy array of values representing noise levels in the data (cells, or cells x trials)
:param: rising_threshold - float used for first crossings
:param: falling_threshold - float used for second crossings
:param: boxWidth - filter size
:param: distance_cutoff - eliminate crossings pairs closer than this- eliminates noise
:returns: 2d or 3d array same size and dimension as traces, labeled with event number
"""
# insure that we have at least one 'trial' dimension.
if traces.ndim == 2:
traces = np.atleast_3d(traces)
stds = np.atleast_2d(stds)
time, cells, trials = traces.shape
# normally tm.findLevels works with a single number, but if the shapes are right then it will broadcast correctly with a larger array
first_crossings = tm.findLevelsNd(traces, np.array(stds)*rising_threshold, mode=first_mode, axis=0, boxWidth=boxWidth)
second_crossings = tm.findLevelsNd(traces, np.array(stds)*falling_threshold, mode=second_mode, axis=0, boxWidth=boxWidth)
events = np.zeros_like(traces)
i=1
for cell in range(cells):
for trial in range(trials):
rising_event_locations = np.where(first_crossings[:,cell,trial])[0] # peel off the tuple
falling_event_locations = np.where(second_crossings[:,cell,trial])[0] # peel off the tuple
possible_pairs = []
for r in rising_event_locations:
if possible_pairs:
prev_rising = zip(*possible_pairs)[0]
prev_falling = zip(*possible_pairs)[1]
if r <= prev_falling[-1]:
continue
try:
f = falling_event_locations[np.searchsorted(falling_event_locations, r)]
possible_pairs.append([r,f])
except IndexError:
possible_pairs.append([r,time])
for pair in possible_pairs:
if pair[1]-pair[0] > distance_cutoff:
events[pair[0]:pair[1], cell, trial] = i
i = i+1
return np.squeeze(events)
def findEventsDombeck(traces, stds, false_positive_rate=0.05, lower_sigma=1, upper_sigma=5, boxWidth=3, distance_cutoff=2):
"""----------------DEPRECATED-----------------------------
This routine uses findEventsAtThreshold() at a range of thresholds to
detect both postive and going events, and calculates a false positive
rate based on the percentage of total negative events
(see Dombeck et al. 2007). It then calculates the threshold closest to
the specificed false postive rate and returns that event array for
positive going events.
The falling value is hardcoded at 0.75 * std of baseline, as per Dombeck et al. 2007.
:param: traces - 2 or 3d numpy array of traces (time x cells or time x cells x trials)
:param: stds - 1 or 2d numpy array of values representing noise levels in the data (cells, or cells x trials)
:param: false_positive_rate - float value of desired false positive rate (0.05 = 5%)
:param: lower_sigma - starting point for scan
:param: upper_sigma - stopping point for scan
:param: boxWidth - window size for pre-smoothing
:param: distance_cutoff - minimum length of event
:returns: events array for traces at desired false positive rate
"""
all_events = []
for sigma in np.arange(lower_sigma, upper_sigma, 0.125):
pos_events = findEventsAtThreshold(traces, stds, sigma, 0.75, first_mode='rising', second_mode='falling', boxWidth=boxWidth, distance_cutoff=distance_cutoff)
neg_events = findEventsAtThreshold(traces, stds, -sigma, -0.75, first_mode='falling', second_mode='rising', boxWidth=boxWidth, distance_cutoff=distance_cutoff)
temp_false_positive_rate = neg_events.max() / (pos_events.max() + neg_events.max())
all_events.append((sigma, pos_events.max(), neg_events.max(), temp_false_positive_rate, pos_events, neg_events))
closest_to_false_pos = np.argmin(np.abs(np.array(zip(*all_events)[3])-false_positive_rate)) # get all false positive rates, find index closest to 0.05
print 'Using sigma cutoff of: ' + str(all_events[closest_to_false_pos][0]) # get the right sigma
return all_events[closest_to_false_pos][4] # pos events are 4th in tuple
``` |
[
{
"content": "```python\nimport sys\n\ntry:\n from django.conf import settings\n\n settings.configure(\n DEBUG=True,\n USE_TZ=True,\n DATABASES={\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n }\n },\n ROOT_URLCONF=\... | [
{
"content": "<|memory_start|>```python\nimport sys\n\ntry:\n from django.conf import settings\n\n settings.configure(\n DEBUG=True,\n USE_TZ=True,\n DATABASES={\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n }\n },\n ... | ```python
import sys
try:
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="dedal.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"dedal",
],
SITE_ID=1,
NOSE_ARGS=['-s'],
MIDDLEWARE_CLASSES=(),
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
from django_nose import NoseTestSuiteRunner
except ImportError:
import traceback
traceback.print_exc()
raise ImportError("To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
test_runner = NoseTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(failures)
if __name__ == '__main__':
run_tests(*sys.argv[1:])
``` |
[
{
"content": "Write the code verbatim:\n```python\nOpenness = ['Adventurousness','Artistic interests','Emotionality','Imagination','Intellect','Authority-challenging']\nConscientiousness = ['Achievement striving','Cautiousness','Dutifulness','Orderliness','Self-discipline','Self-efficacy']\nExtraversion = ['Act... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nOpenness = ['Adventurousness','Artistic interests','Emotionality','Imagination','Intellect','Authority-challenging']\nConscientiousness = ['Achievement striving','Cautiousness','Dutifulness','Orderliness','Self-discipline','Self-efficacy']\nExtr... | ```python
Openness = ['Adventurousness','Artistic interests','Emotionality','Imagination','Intellect','Authority-challenging']
Conscientiousness = ['Achievement striving','Cautiousness','Dutifulness','Orderliness','Self-discipline','Self-efficacy']
Extraversion = ['Activity level','Assertiveness','Cheerfulness','Excitement-seeking','Outgoing','Gregariousness']
Agreeableness = ['Altruism','Cooperation','Modesty','Uncompromising','Sympathy','Trust']
EmotionalRange = ['Fiery','Prone to worry','Melancholy','Immoderation','Self-consciousness','Susceptible to stress']
OpAttri = Openness#[ Openness[0] , Openness[2] , Openness[3]]
ConAttri = Conscientiousness#[ Conscientiousness[0] , Conscientiousness[1] , Conscientiousness[2]]
ExtraAttri = Extraversion#[ Extraversion[0] , Extraversion[1] , Extraversion[2]]
AgreeAttri = Agreeableness#[ Agreeableness[1] , Agreeableness[2] , Agreeableness[4]]
EmoAttri = EmotionalRange#[ EmotionalRange[2] , EmotionalRange[4] , EmotionalRange[5]]
personality_traits = ["Openness","Conscientiousness","Extraversion","Agreeableness","Emotional range"]
combinedTraits = [OpAttri,ConAttri,ExtraAttri,AgreeAttri,EmoAttri]
recordLimit = 10
CriticalCount = 3
SelectedAttributes = ['Cautiousness','Melancholy','Self-consciousness','Susceptible to stress','Prone to worry','Authority-challenging','Immoderation','Sympathy','Trust','Cheerfulness']
Constraints = {
'Authority-challenging' : [92.00,100.00],
'Cautiousness' : [93.00,99.00],
'Sympathy' : [0.00,0.15],
'Trust' : [0.00,0.15],
'Prone to worry' : [85.00,100.00],
'Melancholy' : [88.00,100.00],
'Immoderation' : [80.00,100.00],
'Self-consciousness' : [75.00,100.00],
'Susceptible to stress' : [80.00,100.00],
'Cheerfulness' : [0.00,20.00]
}
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\nrun_script = True\n\nimport scribus\nfrom tempfile import NamedTemporaryFile\n\ntry:\n import markdown\nexcept:\n scribus.messageBox('python-markdown not installed',\n 'You need to install python-markdown for this... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\nrun_script = True\n\nimport scribus\nfrom tempfile import NamedTemporaryFile\n\ntry:\n import markdown\nexcept:\n scribus.messageBox('python-markdown not installed',\n 'You need to install python-m... | ```python
run_script = True
import scribus
from tempfile import NamedTemporaryFile
try:
import markdown
except:
scribus.messageBox('python-markdown not installed',
'You need to install python-markdown for this script to work', scribus.ICON_WARNING)
run_script = False
run_script &= bool(scribus.getSelectedObject(0)) # We must have at least one selected object
if run_script and scribus.getSelectedObject(1):
result = scribus.messageBox('', 'More than one item selected, load all?',
button1=scribus.BUTTON_CANCEL, button2=scribus.BUTTON_YES)
if result == scribus.BUTTON_CANCEL:
run_script = False
def main():
md_name = scribus.fileDialog("Select a file", 'Markdown (*.md)')
if not md_name:
return
f = NamedTemporaryFile(suffix='.html')
markdown.markdownFromFile(md_name, f)
f.flush()
html_name = f.name
i = 0
while True:
ob_name = scribus.getSelectedObject(i)
if not ob_name:
break
if scribus.getObjectType(ob_name) == 'TextFrame':
scribus.insertHtmlText(html_name, ob_name)
i += 1
if run_script:
main()
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\nfrom compat import patterns, url\n\n\nurlpatterns = patterns('copywriting',\n url(r'^author/(?P<author>\\w[^/]+)$', 'views.listArticlesByAuthor'),\n url(r'^author/(?P<author>\\w[^/]+)/$', 'views.listArticlesByAuthor', name... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\nfrom compat import patterns, url\n\n\nurlpatterns = patterns('copywriting',\n url(r'^author/(?P<author>\\w[^/]+)$', 'views.listArticlesByAuthor'),\n url(r'^author/(?P<author>\\w[^/]+)/$', 'views.listArticle... | ```python
from compat import patterns, url
urlpatterns = patterns('copywriting',
url(r'^author/(?P<author>\w[^/]+)$', 'views.listArticlesByAuthor'),
url(r'^author/(?P<author>\w[^/]+)/$', 'views.listArticlesByAuthor', name='copywriting_by_author'),
url(r'^tag/(?P<in_tag>\w[^/]+)$', 'views.withTag'),
url(r'^tag/(?P<in_tag>\w[^/]+)/$', 'views.withTag', name='copywriting_by_tag'),
# (r'^(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$', 'views.listBlogEntriesByYearMonthDay'),
url(r'^(?P<requestYear>\d+)/(?P<requestMonth>\d+)/$', 'views.listArticlesByYearMonth', name='copywriting_by_month'),
url(r'^(?P<requestYear>\d+)/$', 'views.listArticlesByYear', name='copywriting_by_year'),
url(r'^(?P<slug>[^\.]+)/$', 'views.showArticle', name='copywriting_article'),
url('^$', 'views.listArticles', name='copywriting_index'),
)
try:
from .feed import blogFeed
urlpatterns += patterns('copywriting',
url(r'^feed\.rss$', blogFeed()),
url(r'^feed/$', blogFeed()),
)
except Exception, ex:
print ex
pass
``` |
[
{
"content": "Here is a code snippet:\n```python\n\"\"\" reports.py\r\nThis file contains report definitions. A report definition is some kind \r\ncallable that should output data in whatever format. Once defined, it \r\nshould be added to the ALL_REPORTS dictionary, where the key is the \r\npublic name used to... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n\"\"\" reports.py\r\nThis file contains report definitions. A report definition is some kind \r\ncallable that should output data in whatever format. Once defined, it \r\nshould be added to the ALL_REPORTS dictionary, where the key is the \r\npub... | ```python
""" reports.py
This file contains report definitions. A report definition is some kind
callable that should output data in whatever format. Once defined, it
should be added to the ALL_REPORTS dictionary, where the key is the
public name used to reference the report, like from the CLI.
Example definition:
def sample_report(avail_info_obj, output_file, args):
avail_info_obj is the AvailabilityInfo instance used to generate the report
output_file is a file-like object that should be used for outputting the
report data.
args is an object containing the user input from the cli (if available)
There is a ReportBase class that assists with handling the args.
"""
from .db import AvailabilityDB
from sqlalchemy import and_
import sys
import logging
ALL_REPORTS = {}
class ReportBase(object):
""" Helper class for report definitions
Basic Usage:
subclasses need to implement the generate method
User Input handling:
This class assists in gather data from user input. When these attributes
are specified, the class will validate the user input and then select
the data from the DB. This will be provided to the generate method.
requires_phone_system - when True, requires that the user specify a
phone_system_id
requires_number_group - when True, requires that the user specify a
number_group_id
Note: the user can specify more than one phone system or number group.
Also, even if the data is not required this will grab the data from the
DB if the objects are specified by the user (useful for when the data is
optional)
"""
requires_phone_system = False
requires_number_group = False
def __init__(self, avail_info_obj, output_file, args):
self.logger = logging.getLogger(__name__)
self.avail_info_obj = avail_info_obj
self.output_file = output_file
self.args = args
phone_systems = self._get_db_rows(
table=AvailabilityDB.phone_system,
args=args,
arg_db_mapping={'system_id': 'id'})
has_errors = False
if self.requires_phone_system and phone_systems is None:
has_errors = True
self.logger.error('Must provide a valid system_id: "-s" or "--system_id"')
elif self.requires_phone_system and not phone_systems:
# here we are checking for an empty list, which means args were
# provided but didn't match any rows
has_errors = True
self.logger.error('No matching phone systems found')
number_groups = self._get_db_rows(
table=AvailabilityDB.number_group,
args=args,
arg_db_mapping={'number_group': 'id'})
if self.requires_number_group and number_groups is None:
has_errors = True
self.logger.error('Must provide a valid number group id: "-g" or "--number_group"')
elif self.requires_number_group and not number_groups:
has_errors = True
self.logger.error('No matching number groups found')
if has_errors:
self.logger.info('Errors encountered, exiting')
sys.exit()
self.generate(phone_systems, number_groups)
def _get_db_rows(self, table, args, arg_db_mapping):
""" Returns a list of rows in table matching data in args
If no applicable arguments are specified in args, returns None
If arguments are specified but no rows are matched, returns an
empty list.
table should be a SQL Alchemy table object
args is a collection of user input from the cli module
arg_db_mappings should be a dict-ish object with a key as the attribute
name in the args collection and the associated value being the DB
column name used for the query
"""
clauses = []
for argname, db_col in arg_db_mapping.items():
v = getattr(args, argname)
if not v:
continue
has_args = True
if type(v) == list:
clauses.append(table.c[db_col].in_(v))
else:
clauses.append(table.c[db_col] == v)
if not clauses:
return None
elif len(clauses) == 1:
clause = clauses[0]
else:
clause = and_(*clauses)
conn = self.avail_info_obj.db.connect()
result = conn.execute(table.select().where(clause))
return result.fetchall()
def write(self, data):
self.output_file.write(data)
def generate(self, phone_systems, number_groups):
raise NotImplemented()
class FakeReport(ReportBase):
def generate(self, phone_systems, number_groups):
self.write('Fake report generation\n\n')
self.write('phone_systems: {}\n'.format(phone_systems))
self.write('number_groups: {}\n'.format(number_groups))
ALL_REPORTS['FakeReport'] = FakeReport
class BatExportReport(ReportBase):
def generate(self, phone_systems, number_groups):
pass
#TODO
ALL_REPORTS['BAT_Export'] = BatExportReport
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nimport requests\n\nfrom .sentry_api_client.api import get_api_instance\n\n\nclass Patrol:\n def __init__(self, sentry_api_token, timeout=None):\n self.headers = {\"Authorization\": \"Bearer {}\".format(sentry_api_token)}\n self... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport requests\n\nfrom .sentry_api_client.api import get_api_instance\n\n\nclass Patrol:\n def __init__(self, sentry_api_token, timeout=None):\n self.headers = {\"Authorization\": \"Bearer {}\".format(sentry_api_token... | ```python
import requests
from .sentry_api_client.api import get_api_instance
class Patrol:
def __init__(self, sentry_api_token, timeout=None):
self.headers = {"Authorization": "Bearer {}".format(sentry_api_token)}
self.timeout = timeout
self.api = get_api_instance(sentry_api_token, timeout)
def _fetch_resources(self, endpoint, organization, project):
endpoint = getattr(self.api, endpoint)
method = getattr(endpoint, "list")
resources = method(organization, project)
yield from resources.body
next_link = resources.client_response.links["next"]
while next_link["results"] == "true":
response = requests.get(
next_link["url"], timeout=self.timeout, headers=self.headers
)
yield from response.json()
next_link = response.links["next"]
def events(self, organization, project):
return self._fetch_resources("project_events", organization, project)
def event(self, organization, project, event_id):
return self.api.project_events.fetch(organization, project, event_id).body
def issues(self, organization, project):
return self._fetch_resources("project_issues", organization, project)
def issue(self, issue_id):
return self.api.issues.fetch(issue_id).body
def update_issue(self, issue_id, data):
return self.api.issues.update(issue_id, body=data).body
def projects(self, organization):
return self._fetch_resources("projects", organization, None)
``` |
[
{
"content": "Here is a code snippet:\n```python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2013 - 2015 CoNWeT Lab., Universidad Politécnica de Madrid\n\n# This file belongs to the business-charging-backend\n# of the Business API Ecosystem.\n\n# This program is free software: you can redistribute it and/or mod... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2013 - 2015 CoNWeT Lab., Universidad Politécnica de Madrid\n\n# This file belongs to the business-charging-backend\n# of the Business API Ecosystem.\n\n# This program is free software: you can redistribu... | ```python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 - 2015 CoNWeT Lab., Universidad Politécnica de Madrid
# This file belongs to the business-charging-backend
# of the Business API Ecosystem.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth import logout as django_logout
from django.http import HttpResponseRedirect
from django.conf import settings
from wstore.store_commons.utils.http import build_response
from wstore.store_commons.utils.url import add_slash
class Http403(Exception):
pass
def logout(request):
django_logout(request)
response = None
if settings.PORTALINSTANCE:
# Check if the logout request is originated in a different domain
if 'HTTP_ORIGIN' in request.META:
origin = request.META['HTTP_ORIGIN']
origin = add_slash(origin)
from wstore.views import ACCOUNT_PORTAL_URL, CLOUD_PORTAL_URL, MASHUP_PORTAL_URL, DATA_PORTAL_URL
allowed_origins = [
add_slash(ACCOUNT_PORTAL_URL),
add_slash(CLOUD_PORTAL_URL),
add_slash(MASHUP_PORTAL_URL),
add_slash(DATA_PORTAL_URL)
]
if origin in allowed_origins:
headers = {
'Access-Control-Allow-Origin': origin,
'Access-Control-Allow-Credentials': 'true'
}
response = build_response(request, 200, 'OK', headers=headers)
else:
response = build_response(request, 403, 'Forbidden')
else:
# If using the FI-LAB authentication and it is not a cross domain
# request redirect to the FI-LAB main page
response = build_response(request, 200, 'OK')
# If not using the FI-LAB authentication redirect to the login page
url = '/login?next=/'
response = HttpResponseRedirect(url)
return response
``` |
[
{
"content": "Here is a code file:\n```python\n# Copyright (c) 2014 Artem Rozumenko (artyom.rozumenko@gmail.com)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ht... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# Copyright (c) 2014 Artem Rozumenko (artyom.rozumenko@gmail.com)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License... | ```python
# Copyright (c) 2014 Artem Rozumenko (artyom.rozumenko@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource tools commands."""
from threading import Thread, current_thread, active_count
from time import time, sleep
from random import randrange
from math import sqrt
from multiprocessing import Process, cpu_count, current_process
from os import urandom, remove
from glob import glob
from tempfile import gettempdir
from os.path import join as join_path
from psutil import swap_memory
from locust.common import IS_WINDOWS
from locust.common import message_wrapper, convert_timeout
def burn_cpu(timeout=30):
"""Burn CPU command.
Start processes with random int.
Arguments:
:timeout - length of time in seconds to burn cpu (Default: 30 sec)
Return:
feedback
"""
timeout = convert_timeout(timeout, def_timeout=30)
for _ in range(cpu_count()):
thread = Process(target=_burn_cpu, args=[timeout])
thread.start()
return message_wrapper('CPU burning started')
def _burn_cpu(timeout=0):
"""Burn CPU command."""
end_time = time() + timeout
while time() < end_time:
sqrt(float(randrange(1, 999999, 1)))
def burn_ram(timeout=30):
"""RAM overflow command.
Fill Ram with garbage data
Arguments:
:timeout - length of time in seconds to burn cpu (Default: 30 sec)
Return:
feedback
"""
timeout = convert_timeout(timeout, def_timeout=30)
process = Process(target=_burn_ram, args=[timeout])
process.start()
return message_wrapper('RAM overflowing has been started')
def _burn_ram(timeout):
"""RAM overflow command."""
f_ratio = 100
d_ratio = f_ratio
fill_ram = ''
decrease = ''
spike = ''
# Start RAM overflow
# Try to fill all free RAM space
while True:
try:
fill_ram = ' ' * int((float(swap_memory().free) / 100) * f_ratio)
break
except (MemoryError, OverflowError):
f_ratio -= 1
# Try to fill all left free RAM space (Windows OS specific)
while True:
try:
decrease = ' ' * int((float(swap_memory().free) / 100) * d_ratio)
break
except (MemoryError, OverflowError):
d_ratio -= 1
end_time = time() + timeout
while time() < end_time:
if float(swap_memory().percent) < 90:
try:
spike += ' ' * int((float(swap_memory().free) / 100) * 10)
except (MemoryError, OverflowError):
spike = ''
del fill_ram
del decrease
del spike
def burn_disk(timeout=30, file_size='1k', thread_limit='200'):
"""Burn HDD command.
Arguments:
timeout - length of time in seconds to burn HDD (Default: 30 sec);
file_size - file size to be created in thread;
thread_limit - thread limit count per process;
Return:
Returns message that burn HDD is started.
"""
timeout = convert_timeout(timeout, def_timeout=30)
values = {
'B': 0,
'K': 10,
'M': 20,
}
if file_size.isdigit():
count = file_size
rate = 'B'
else:
rate = file_size[-1:].upper()
count = file_size[:-1]
if not (rate in values and count.isdigit()):
mgs = ('Wrong format of file_size param "{param}". "file_size" '
'Parameter should have the following format:'
'"<size_in_digit><Multiplifier>". Correct values for '
'multiplifier is - {mult}')
keys = values.keys() + [k.lower() for k in values.keys()]
raise TypeError(mgs.format(param=file_size, mult=' '.join(keys)))
if not thread_limit.isdigit():
raise TypeError('Thread limit parameter should have the following '
'format:"<count_in_digit>"')
file_size = int(int(count) << values[rate])
end_time = time() + timeout
for _ in xrange(cpu_count()):
process = Process(target=_burn_disk,
args=[end_time, file_size, int(thread_limit)])
process.start()
return message_wrapper('HDD burning has been started')
def _burn_disk(end_time, file_size, thread_limit):
"""Burn HDD command."""
def _start_write():
"""Write data to temp file."""
while time() < end_time:
file_name = current_process().name + '_' + current_thread().name
file_name = join_path(gettempdir(), file_name)
try:
open_file = open(file_name, 'w')
open_file.write(str(urandom(file_size)))
except IOError:
pass
finally:
open_file.close()
if IS_WINDOWS:
overall_file_limit = 16000
else:
import resource
overall_file_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
thread_count = overall_file_limit / cpu_count()
if thread_count > thread_limit:
thread_count = thread_limit
was_threads = active_count()
for _ in xrange(thread_count):
thread = Thread(target=_start_write)
thread.start()
while active_count() > was_threads:
sleep(1)
for each in glob(join_path(gettempdir(), current_process().name + '*')):
remove(each)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\nfrom sqlalchemy import Column, String, Integer, DateTime, PickleType, Boolean\nfrom mcmeAPI.db import Base\n\nclass User(Base):\n __tablename__ = 'users'\n\n name = Column(String(20), primary_key=True)\n group = Column(String(15)) #ran... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nfrom sqlalchemy import Column, String, Integer, DateTime, PickleType, Boolean\nfrom mcmeAPI.db import Base\n\nclass User(Base):\n __tablename__ = 'users'\n\n name = Column(String(20), primary_key=True)\n group = Column(... | ```python
from sqlalchemy import Column, String, Integer, DateTime, PickleType, Boolean
from mcmeAPI.db import Base
class User(Base):
__tablename__ = 'users'
name = Column(String(20), primary_key=True)
group = Column(String(15)) #rank
updated = Column(DateTime())
ob = Column(Boolean)
staff = Column(Boolean)
permissions = Column(PickleType)
worlds = Column(PickleType)
@property
def serialize(self):
return {'group':self.group,
'name':self.name,
'ob':self.ob,
'staff':self.staff,
'permissions':self.permissions,
'worlds':self.worlds,
'updated':dump_datetime(self.updated)
}
def __repr__(self):
return '<User %r>' % (self.name)
class Server(Base):
__tablename__ = 'servers'
name = Column(String(15), primary_key=True)
status = Column(String(10))
players = Column(PickleType)
maxplayers = Column(Integer)
num_players = Column(Integer)
plugins = Column(PickleType)
updated = Column(DateTime())
@property
def serialize(self):
return ({'name': self.name,
'status': self.status,
'players':self.players,
'num_players':self.num_players,
'maxplayers':self.maxplayers,
'plugins':self.plugins,
'updated':dump_datetime(self.updated)})
def dump_datetime(value):
"""Deserialize datetime object into string form for JSON processing."""
if value is None:
return None
return value.strftime("%Y-%m-%dT%H:%M:%S")
``` |
[
{
"content": "Here is the script:\n```python\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#... | ```python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image pre-processing utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
import cnn_util
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.framework import function
from tensorflow.python.layers import utils
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import gfile
import mlperf
def parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields:
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
text: Tensor tf.string containing the human-readable label.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], label, bbox, features['image/class/text']
_RESIZE_METHOD_MAP = {
'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR,
'bilinear': tf.image.ResizeMethod.BILINEAR,
'bicubic': tf.image.ResizeMethod.BICUBIC,
'area': tf.image.ResizeMethod.AREA
}
def get_image_resize_method(resize_method, batch_position=0):
"""Get tensorflow resize method.
If resize_method is 'round_robin', return different methods based on batch
position in a round-robin fashion. NOTE: If the batch size is not a multiple
of the number of methods, then the distribution of methods will not be
uniform.
Args:
resize_method: (string) nearest, bilinear, bicubic, area, or round_robin.
batch_position: position of the image in a batch. NOTE: this argument can
be an integer or a tensor
Returns:
one of resize type defined in tf.image.ResizeMethod.
"""
if resize_method != 'round_robin':
return _RESIZE_METHOD_MAP[resize_method]
# return a resize method based on batch position in a round-robin fashion.
resize_methods = list(_RESIZE_METHOD_MAP.values())
def lookup(index):
return resize_methods[index]
def resize_method_0():
return utils.smart_cond(batch_position % len(resize_methods) == 0,
lambda: lookup(0), resize_method_1)
def resize_method_1():
return utils.smart_cond(batch_position % len(resize_methods) == 1,
lambda: lookup(1), resize_method_2)
def resize_method_2():
return utils.smart_cond(batch_position % len(resize_methods) == 2,
lambda: lookup(2), lambda: lookup(3))
# NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here
# because TF would not be able to construct a finite graph.
return resize_method_0()
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
# with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):
with tf.name_scope(scope or 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3,
fancy_upscaling=False,
dct_method='INTEGER_FAST')
# image = tf.Print(image, [tf.shape(image)], 'Image shape: ')
return image
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
def normalized_image(images):
# Rescale from [0, 255] to [0, 2]
images = tf.multiply(images, 1. / 127.5)
# Rescale to [-1, 1]
mlperf.logger.log(key=mlperf.tags.INPUT_MEAN_SUBTRACTION, value=[1.0] * 3)
return tf.subtract(images, 1.0)
def eval_image(image,
height,
width,
batch_position,
resize_method,
summary_verbosity=0):
"""Get the image for model evaluation.
We preprocess the image simiarly to Slim, see
https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/vgg_preprocessing.py
Validation images do not have bounding boxes, so to crop the image, we first
resize the image such that the aspect ratio is maintained and the resized
height and width are both at least 1.145 times `height` and `width`
respectively. Then, we do a central crop to size (`height`, `width`).
Args:
image: 3-D float Tensor representing the image.
height: The height of the image that will be returned.
width: The width of the image that will be returned.
batch_position: position of the image in a batch, which affects how images
are distorted and resized. NOTE: this argument can be an integer or a
tensor
resize_method: one of the strings 'round_robin', 'nearest', 'bilinear',
'bicubic', or 'area'.
summary_verbosity: Verbosity level for summary ops. Pass 0 to disable both
summaries and checkpoints.
Returns:
An image of size (output_height, output_width, 3) that is resized and
cropped as described above.
"""
# TODO(reedwm): Currently we resize then crop. Investigate if it's faster to
# crop then resize.
with tf.name_scope('eval_image'):
if summary_verbosity >= 3:
tf.summary.image(
'original_image', tf.expand_dims(image, 0))
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
image_height_float = tf.cast(image_height, tf.float32)
image_width_float = tf.cast(image_width, tf.float32)
# This value is chosen so that in resnet, images are cropped to a size of
# 256 x 256, which matches what other implementations do. The final image
# size for resnet is 224 x 224, and floor(224 * 1.145) = 256.
scale_factor = 1.145
# Compute resize_height and resize_width to be the minimum values such that
# 1. The aspect ratio is maintained (i.e. resize_height / resize_width is
# image_height / image_width), and
# 2. resize_height >= height * `scale_factor`, and
# 3. resize_width >= width * `scale_factor`
max_ratio = tf.maximum(height / image_height_float,
width / image_width_float)
resize_height = tf.cast(image_height_float * max_ratio * scale_factor,
tf.int32)
resize_width = tf.cast(image_width_float * max_ratio * scale_factor,
tf.int32)
mlperf.logger.log_input_resize_aspect_preserving(height, width,
scale_factor)
# Resize the image to shape (`resize_height`, `resize_width`)
image_resize_method = get_image_resize_method(resize_method, batch_position)
distorted_image = tf.image.resize_images(image,
[resize_height, resize_width],
image_resize_method,
align_corners=False)
# Do a central crop of the image to size (height, width).
# MLPerf requires us to log (height, width) with two different keys.
mlperf.logger.log(key=mlperf.tags.INPUT_CENTRAL_CROP, value=[height, width])
mlperf.logger.log(key=mlperf.tags.INPUT_RESIZE, value=[height, width])
total_crop_height = (resize_height - height)
crop_top = total_crop_height // 2
total_crop_width = (resize_width - width)
crop_left = total_crop_width // 2
distorted_image = tf.slice(distorted_image, [crop_top, crop_left, 0],
[height, width, 3])
distorted_image.set_shape([height, width, 3])
if summary_verbosity >= 3:
tf.summary.image(
'cropped_resized_image', tf.expand_dims(distorted_image, 0))
image = distorted_image
return image
def train_image(image_buffer,
height,
width,
bbox,
batch_position,
resize_method,
distortions,
scope=None,
summary_verbosity=0,
distort_color_in_yiq=False,
fuse_decode_and_crop=False):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
batch_position: position of the image in a batch, which affects how images
are distorted and resized. NOTE: this argument can be an integer or a
tensor
resize_method: round_robin, nearest, bilinear, bicubic, or area.
distortions: If true, apply full distortions for image colors.
scope: Optional scope for op_scope.
summary_verbosity: Verbosity level for summary ops. Pass 0 to disable both
summaries and checkpoints.
distort_color_in_yiq: distort color of input images in YIQ space.
fuse_decode_and_crop: fuse the decode/crop operation.
Returns:
3-D float Tensor of distorted image used for training.
"""
# with tf.op_scope([image, height, width, bbox], scope, 'distort_image'):
# with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
with tf.name_scope(scope or 'distort_image'):
# A large fraction of image datasets contain a human-annotated bounding box
# delineating the region of the image containing the object of interest. We
# choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
min_object_covered = 0.1
aspect_ratio_range = [0.75, 1.33]
area_range = [0.05, 1.0]
max_attempts = 100
mlperf.logger.log(key=mlperf.tags.INPUT_DISTORTED_CROP_MIN_OBJ_COV,
value=min_object_covered)
mlperf.logger.log(key=mlperf.tags.INPUT_DISTORTED_CROP_RATIO_RANGE,
value=aspect_ratio_range)
mlperf.logger.log(key=mlperf.tags.INPUT_DISTORTED_CROP_AREA_RANGE,
value=area_range)
mlperf.logger.log(key=mlperf.tags.INPUT_DISTORTED_CROP_MAX_ATTEMPTS,
value=max_attempts)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.image.extract_jpeg_shape(image_buffer),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
if summary_verbosity >= 3:
image = tf.image.decode_jpeg(image_buffer, channels=3,
dct_method='INTEGER_FAST')
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distort_bbox)
tf.summary.image(
'images_with_distorted_bounding_box',
image_with_distorted_box)
# Crop the image to the specified bounding box.
if fuse_decode_and_crop:
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(
image_buffer, crop_window, channels=3)
else:
image = tf.image.decode_jpeg(image_buffer, channels=3,
dct_method='INTEGER_FAST')
image = tf.slice(image, bbox_begin, bbox_size)
mlperf.logger.log(key=mlperf.tags.INPUT_RANDOM_FLIP)
distorted_image = tf.image.random_flip_left_right(image)
# This resizing operation may distort the images because the aspect
# ratio is not respected.
mlperf.logger.log(key=mlperf.tags.INPUT_RESIZE, value=[height, width])
image_resize_method = get_image_resize_method(resize_method, batch_position)
distorted_image = tf.image.resize_images(
distorted_image, [height, width],
image_resize_method,
align_corners=False)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if summary_verbosity >= 3:
tf.summary.image('cropped_resized_maybe_flipped_image',
tf.expand_dims(distorted_image, 0))
if distortions:
distorted_image = tf.cast(distorted_image, dtype=tf.float32)
# Images values are expected to be in [0,1] for color distortion.
distorted_image /= 255.
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, batch_position,
distort_color_in_yiq=distort_color_in_yiq)
# Note: This ensures the scaling matches the output of eval_image
distorted_image *= 255
if summary_verbosity >= 3:
tf.summary.image(
'final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def distort_color(image, batch_position=0, distort_color_in_yiq=False,
scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops based on the position of the image in a batch.
Args:
image: float32 Tensor containing single image. Tensor values should be in
range [0, 1].
batch_position: the position of the image in a batch. NOTE: this argument
can be an integer or a tensor
distort_color_in_yiq: distort color of input images in YIQ space.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
if distort_color_in_yiq:
try:
from tensorflow.contrib.image.python.ops import distort_image_ops # pylint: disable=g-import-not-at-top
except ImportError:
raise ValueError(
'In TF2, you cannot pass --distortions unless you also pass '
'--nodistort_color_in_yiq. This is because the random_hsv_in_yiq was '
'removed in TF2. --distortions does not improve accuracy on resnet '
'so it is not recommended. --nodistort_color_in_yiq also has no '
'impact on accuracy, but may hurt performance.')
with tf.name_scope(scope or 'distort_color'):
def distort_fn_0(image=image):
"""Variant 0 of distort function."""
image = tf.image.random_brightness(image, max_delta=32. / 255.)
if distort_color_in_yiq:
image = distort_image_ops.random_hsv_in_yiq(
image, lower_saturation=0.5, upper_saturation=1.5,
max_delta_hue=0.2 * math.pi)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
return image
def distort_fn_1(image=image):
"""Variant 1 of distort function."""
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
if distort_color_in_yiq:
image = distort_image_ops.random_hsv_in_yiq(
image, lower_saturation=0.5, upper_saturation=1.5,
max_delta_hue=0.2 * math.pi)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
return image
image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0,
distort_fn_1)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
class InputPreprocessor(object):
"""Base class for all model preprocessors."""
def __init__(self, batch_size, output_shapes):
self.batch_size = batch_size
self.output_shapes = output_shapes
def supports_datasets(self):
"""Whether this preprocessor supports dataset."""
return False
def minibatch(self, dataset, subset, params, shift_ratio=-1):
"""Returns tensors representing a minibatch of all the input."""
raise NotImplementedError('Must be implemented by subclass.')
# The methods added below are only supported/used if supports_datasets()
# returns True.
# TODO(laigd): refactor benchmark_cnn.py and put the logic of
# _build_input_processing() into InputPreprocessor.
def parse_and_preprocess(self, value, batch_position):
"""Function to parse and preprocess an Example proto in input pipeline."""
raise NotImplementedError('Must be implemented by subclass.')
# TODO(laigd): figure out how to remove these parameters, since the
# preprocessor itself has self.batch_size, self.num_splits, etc defined.
def build_multi_device_iterator(self, batch_size, num_splits, cpu_device,
params, gpu_devices, dataset, doing_eval):
"""Creates a MultiDeviceIterator."""
assert self.supports_datasets()
assert num_splits == len(gpu_devices)
with tf.name_scope('batch_processing'):
if doing_eval:
subset = 'validation'
else:
subset = 'train'
batch_size_per_split = batch_size // num_splits
ds = self.create_dataset(
batch_size,
num_splits,
batch_size_per_split,
dataset,
subset,
train=(not doing_eval),
datasets_repeat_cached_sample=params.datasets_repeat_cached_sample,
num_threads=params.datasets_num_private_threads,
datasets_use_caching=params.datasets_use_caching,
datasets_parallel_interleave_cycle_length=(
params.datasets_parallel_interleave_cycle_length),
datasets_sloppy_parallel_interleave=(
params.datasets_sloppy_parallel_interleave),
datasets_parallel_interleave_prefetch=(
params.datasets_parallel_interleave_prefetch))
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
ds,
gpu_devices,
source_device=cpu_device,
max_buffer_size=params.multi_device_iterator_max_buffer_size)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
multi_device_iterator.initializer)
return multi_device_iterator
def create_dataset(self,
batch_size,
num_splits,
batch_size_per_split,
dataset,
subset,
train,
datasets_repeat_cached_sample,
num_threads=None,
datasets_use_caching=False,
datasets_parallel_interleave_cycle_length=None,
datasets_sloppy_parallel_interleave=False,
datasets_parallel_interleave_prefetch=None):
"""Creates a dataset for the benchmark."""
raise NotImplementedError('Must be implemented by subclass.')
def create_iterator(self, ds):
ds_iterator = tf.data.make_initializable_iterator(ds)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
ds_iterator.initializer)
return ds_iterator
def minibatch_fn(self, batch_size, model_input_shapes, num_splits,
dataset, subset, train, datasets_repeat_cached_sample,
num_threads, datasets_use_caching,
datasets_parallel_interleave_cycle_length,
datasets_sloppy_parallel_interleave,
datasets_parallel_interleave_prefetch):
"""Returns a function and list of args for the fn to create a minibatch."""
assert self.supports_datasets()
batch_size_per_split = batch_size // num_splits
assert batch_size_per_split == model_input_shapes[0][0]
with tf.name_scope('batch_processing'):
ds = self.create_dataset(batch_size, num_splits, batch_size_per_split,
dataset, subset, train,
datasets_repeat_cached_sample, num_threads,
datasets_use_caching,
datasets_parallel_interleave_cycle_length,
datasets_sloppy_parallel_interleave,
datasets_parallel_interleave_prefetch)
ds_iterator = self.create_iterator(ds)
ds_iterator_string_handle = ds_iterator.string_handle()
@function.Defun(tf.string)
def _fn(h):
remote_iterator = tf.data.Iterator.from_string_handle(
h, ds_iterator.output_types, ds_iterator.output_shapes)
input_list = remote_iterator.get_next()
reshaped_input_list = [
tf.reshape(input_list[i], shape=model_input_shapes[i])
for i in range(len(input_list))
]
return reshaped_input_list
return _fn, [ds_iterator_string_handle]
class BaseImagePreprocessor(InputPreprocessor):
"""Base class for all image model preprocessors."""
def __init__(self,
batch_size,
output_shapes,
num_splits,
dtype,
train,
distortions,
resize_method,
shift_ratio=-1,
summary_verbosity=0,
distort_color_in_yiq=True,
fuse_decode_and_crop=True,
match_mlperf=False):
super(BaseImagePreprocessor, self).__init__(batch_size, output_shapes)
image_shape = output_shapes[0]
# image_shape is in form (batch_size, height, width, depth)
self.height = image_shape[1]
self.width = image_shape[2]
self.depth = image_shape[3]
self.num_splits = num_splits
self.dtype = dtype
self.train = train
self.resize_method = resize_method
self.shift_ratio = shift_ratio
self.distortions = distortions
self.distort_color_in_yiq = distort_color_in_yiq
self.fuse_decode_and_crop = fuse_decode_and_crop
if self.batch_size % self.num_splits != 0:
raise ValueError(
('batch_size must be a multiple of num_splits: '
'batch_size %d, num_splits: %d') %
(self.batch_size, self.num_splits))
self.batch_size_per_split = self.batch_size // self.num_splits
self.summary_verbosity = summary_verbosity
self.match_mlperf = match_mlperf
def parse_and_preprocess(self, value, batch_position):
assert self.supports_datasets()
image_buffer, label_index, bbox, _ = parse_example_proto(value)
if self.match_mlperf:
bbox = tf.zeros((1, 0, 4), dtype=bbox.dtype)
mlperf.logger.log(key=mlperf.tags.INPUT_CROP_USES_BBOXES, value=False)
else:
mlperf.logger.log(key=mlperf.tags.INPUT_CROP_USES_BBOXES, value=True)
image = self.preprocess(image_buffer, bbox, batch_position)
return (image, label_index)
def preprocess(self, image_buffer, bbox, batch_position):
raise NotImplementedError('Must be implemented by subclass.')
def create_dataset(self,
batch_size,
num_splits,
batch_size_per_split,
dataset,
subset,
train,
datasets_repeat_cached_sample,
num_threads=None,
datasets_use_caching=False,
datasets_parallel_interleave_cycle_length=None,
datasets_sloppy_parallel_interleave=False,
datasets_parallel_interleave_prefetch=None):
"""Creates a dataset for the benchmark."""
assert self.supports_datasets()
glob_pattern = dataset.tf_record_pattern(subset)
file_names = gfile.Glob(glob_pattern)
if not file_names:
raise ValueError('Found no files in --data_dir matching: {}'
.format(glob_pattern))
ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=train)
ds = ds.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
cycle_length=datasets_parallel_interleave_cycle_length or 10,
sloppy=datasets_sloppy_parallel_interleave,
prefetch_input_elements=datasets_parallel_interleave_prefetch))
if datasets_repeat_cached_sample:
# Repeat a single sample element indefinitely to emulate memory-speed IO.
ds = ds.take(1).cache().repeat()
counter = tf.data.Dataset.range(batch_size)
counter = counter.repeat()
ds = tf.data.Dataset.zip((ds, counter))
ds = ds.prefetch(buffer_size=batch_size)
if datasets_use_caching:
ds = ds.cache()
if train:
buffer_size = 10000
mlperf.logger.log(key=mlperf.tags.INPUT_SHARD, value=buffer_size)
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=buffer_size))
else:
ds = ds.repeat()
ds = ds.apply(
tf.data.experimental.map_and_batch(
map_func=self.parse_and_preprocess,
batch_size=batch_size_per_split,
num_parallel_batches=num_splits))
ds = ds.prefetch(buffer_size=num_splits)
if num_threads:
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = num_threads
ds = ds.with_options(options)
return ds
class RecordInputImagePreprocessor(BaseImagePreprocessor):
"""Preprocessor for images with RecordInput format."""
def preprocess(self, image_buffer, bbox, batch_position):
"""Preprocessing image_buffer as a function of its batch position."""
if self.train:
image = train_image(image_buffer, self.height, self.width, bbox,
batch_position, self.resize_method, self.distortions,
None, summary_verbosity=self.summary_verbosity,
distort_color_in_yiq=self.distort_color_in_yiq,
fuse_decode_and_crop=self.fuse_decode_and_crop)
else:
image = tf.image.decode_jpeg(
image_buffer, channels=3, dct_method='INTEGER_FAST')
image = eval_image(image, self.height, self.width, batch_position,
self.resize_method,
summary_verbosity=self.summary_verbosity)
# Note: image is now float32 [height,width,3] with range [0, 255]
# image = tf.cast(image, tf.uint8) # HACK TESTING
if self.match_mlperf:
mlperf.logger.log(key=mlperf.tags.INPUT_MEAN_SUBTRACTION,
value=_CHANNEL_MEANS)
normalized = image - _CHANNEL_MEANS
else:
normalized = normalized_image(image)
return tf.cast(normalized, self.dtype)
def minibatch(self,
dataset,
subset,
params,
shift_ratio=-1):
if shift_ratio < 0:
shift_ratio = self.shift_ratio
with tf.name_scope('batch_processing'):
# Build final results per split.
images = [[] for _ in range(self.num_splits)]
labels = [[] for _ in range(self.num_splits)]
if params.use_datasets:
ds = self.create_dataset(
self.batch_size, self.num_splits, self.batch_size_per_split,
dataset, subset, self.train,
datasets_repeat_cached_sample=params.datasets_repeat_cached_sample,
num_threads=params.datasets_num_private_threads,
datasets_use_caching=params.datasets_use_caching,
datasets_parallel_interleave_cycle_length=(
params.datasets_parallel_interleave_cycle_length),
datasets_sloppy_parallel_interleave=(
params.datasets_sloppy_parallel_interleave),
datasets_parallel_interleave_prefetch=(
params.datasets_parallel_interleave_prefetch))
ds_iterator = self.create_iterator(ds)
for d in xrange(self.num_splits):
images[d], labels[d] = ds_iterator.get_next()
# TODO(laigd): consider removing the --use_datasets option, it should
# always use datasets.
else:
record_input = data_flow_ops.RecordInput(
file_pattern=dataset.tf_record_pattern(subset),
seed=301,
parallelism=64,
buffer_size=10000,
batch_size=self.batch_size,
shift_ratio=shift_ratio,
name='record_input')
records = record_input.get_yield_op()
records = tf.split(records, self.batch_size, 0)
records = [tf.reshape(record, []) for record in records]
for idx in xrange(self.batch_size):
value = records[idx]
(image, label) = self.parse_and_preprocess(value, idx)
split_index = idx % self.num_splits
labels[split_index].append(label)
images[split_index].append(image)
for split_index in xrange(self.num_splits):
if not params.use_datasets:
images[split_index] = tf.parallel_stack(images[split_index])
labels[split_index] = tf.concat(labels[split_index], 0)
images[split_index] = tf.reshape(
images[split_index],
shape=[self.batch_size_per_split, self.height, self.width,
self.depth])
labels[split_index] = tf.reshape(labels[split_index],
[self.batch_size_per_split])
return images, labels
def supports_datasets(self):
return True
class ImagenetPreprocessor(RecordInputImagePreprocessor):
def preprocess(self, image_buffer, bbox, batch_position):
# pylint: disable=g-import-not-at-top
try:
from official.r1.resnet.imagenet_preprocessing import preprocess_image
except ImportError:
tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH.')
raise
if self.train:
image = preprocess_image(
image_buffer, bbox, self.height, self.width, self.depth,
is_training=True)
else:
image = preprocess_image(
image_buffer, bbox, self.height, self.width, self.depth,
is_training=False)
return tf.cast(image, self.dtype)
class Cifar10ImagePreprocessor(BaseImagePreprocessor):
"""Preprocessor for Cifar10 input images."""
def _distort_image(self, image):
"""Distort one image for training a network.
Adopted the standard data augmentation scheme that is widely used for
this dataset: the images are first zero-padded with 4 pixels on each side,
then randomly cropped to again produce distorted images; half of the images
are then horizontally mirrored.
Args:
image: input image.
Returns:
distorted image.
"""
image = tf.image.resize_image_with_crop_or_pad(
image, self.height + 8, self.width + 8)
distorted_image = tf.random_crop(image,
[self.height, self.width, self.depth])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
if self.summary_verbosity >= 3:
tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
return distorted_image
def _eval_image(self, image):
"""Get the image for model evaluation."""
distorted_image = tf.image.resize_image_with_crop_or_pad(
image, self.width, self.height)
if self.summary_verbosity >= 3:
tf.summary.image('cropped.image', tf.expand_dims(distorted_image, 0))
return distorted_image
def preprocess(self, raw_image):
"""Preprocessing raw image."""
if self.summary_verbosity >= 3:
tf.summary.image('raw.image', tf.expand_dims(raw_image, 0))
if self.train and self.distortions:
image = self._distort_image(raw_image)
else:
image = self._eval_image(raw_image)
normalized = normalized_image(image)
return tf.cast(normalized, self.dtype)
def minibatch(self,
dataset,
subset,
params,
shift_ratio=-1):
# TODO(jsimsa): Implement datasets code path
del shift_ratio, params
with tf.name_scope('batch_processing'):
all_images, all_labels = dataset.read_data_files(subset)
all_images = tf.constant(all_images)
all_labels = tf.constant(all_labels)
input_image, input_label = tf.train.slice_input_producer(
[all_images, all_labels])
input_image = tf.cast(input_image, self.dtype)
input_label = tf.cast(input_label, tf.int32)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(dataset.num_examples_per_epoch(subset) *
min_fraction_of_examples_in_queue)
raw_images, raw_labels = tf.train.shuffle_batch(
[input_image, input_label], batch_size=self.batch_size,
capacity=min_queue_examples + 3 * self.batch_size,
min_after_dequeue=min_queue_examples)
images = [[] for i in range(self.num_splits)]
labels = [[] for i in range(self.num_splits)]
# Create a list of size batch_size, each containing one image of the
# batch. Without the unstack call, raw_images[i] would still access the
# same image via a strided_slice op, but would be slower.
raw_images = tf.unstack(raw_images, axis=0)
raw_labels = tf.unstack(raw_labels, axis=0)
for i in xrange(self.batch_size):
split_index = i % self.num_splits
# The raw image read from data has the format [depth, height, width]
# reshape to the format returned by minibatch.
raw_image = tf.reshape(raw_images[i],
[dataset.depth, dataset.height, dataset.width])
raw_image = tf.transpose(raw_image, [1, 2, 0])
image = self.preprocess(raw_image)
images[split_index].append(image)
labels[split_index].append(raw_labels[i])
for split_index in xrange(self.num_splits):
images[split_index] = tf.parallel_stack(images[split_index])
labels[split_index] = tf.parallel_stack(labels[split_index])
return images, labels
class COCOPreprocessor(BaseImagePreprocessor):
"""Preprocessor for COCO dataset input images, boxes, and labels."""
def minibatch(self,
dataset,
subset,
params,
shift_ratio=-1):
del shift_ratio # Not used when using datasets instead of data_flow_ops
with tf.name_scope('batch_processing'):
ds = self.create_dataset(
batch_size=self.batch_size,
num_splits=self.num_splits,
batch_size_per_split=self.batch_size_per_split,
dataset=dataset,
subset=subset,
train=self.train,
datasets_repeat_cached_sample=params.datasets_repeat_cached_sample,
num_threads=params.datasets_num_private_threads,
datasets_use_caching=params.datasets_use_caching,
datasets_parallel_interleave_cycle_length=(
params.datasets_parallel_interleave_cycle_length),
datasets_sloppy_parallel_interleave=(
params.datasets_sloppy_parallel_interleave),
datasets_parallel_interleave_prefetch=(
params.datasets_parallel_interleave_prefetch))
ds_iterator = self.create_iterator(ds)
# Training data: 4 tuple
# Validation data: 5 tuple
# See get_input_shapes in models/ssd_model.py for details.
input_len = 4 if subset == 'train' else 5
input_lists = [[None for _ in range(self.num_splits)]
for _ in range(input_len)]
for d in xrange(self.num_splits):
input_list = ds_iterator.get_next()
for i in range(input_len):
input_lists[i][d] = input_list[i]
return input_lists
def preprocess(self, data):
try:
import ssd_dataloader # pylint: disable=g-import-not-at-top
import ssd_constants # pylint: disable=g-import-not-at-top
from object_detection.core import preprocessor # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs by '
'following https://github.com/tensorflow/models/blob/'
'master/research/object_detection/g3doc/installation.md'
'#protobuf-compilation')
image_buffer = data['image_buffer']
boxes = data['groundtruth_boxes']
classes = tf.reshape(data['groundtruth_classes'], [-1, 1])
source_id = tf.string_to_number(data['source_id'])
raw_shape = data['raw_shape']
ssd_encoder = ssd_dataloader.Encoder()
# Only 80 of the 90 COCO classes are used.
class_map = tf.convert_to_tensor(ssd_constants.CLASS_MAP)
classes = tf.gather(class_map, classes)
classes = tf.cast(classes, dtype=tf.float32)
if self.train:
image, boxes, classes = ssd_dataloader.ssd_decode_and_crop(
image_buffer, boxes, classes, raw_shape)
# ssd_crop resizes and returns image of dtype float32 and does not change
# its range (i.e., value in between 0--255). Divide by 255. converts it
# to [0, 1] range. Not doing this before cropping to avoid dtype cast
# (which incurs additional memory copy).
image /= 255.
image, boxes = preprocessor.random_horizontal_flip(
image=image, boxes=boxes)
# Random horizontal flip probability is 50%
# See https://github.com/tensorflow/models/blob/master/research/object_detection/core/preprocessor.py # pylint: disable=line-too-long
mlperf.logger.log(key=mlperf.tags.RANDOM_FLIP_PROBABILITY, value=0.5)
image = tf.cast(image, self.dtype)
encoded_returns = ssd_encoder.encode_labels(boxes, classes)
encoded_classes, encoded_boxes, num_matched_boxes = encoded_returns
# Shape of image: [width, height, channel]
# Shape of encoded_boxes: [NUM_SSD_BOXES, 4]
# Shape of encoded_classes: [NUM_SSD_BOXES, 1]
# Shape of num_matched_boxes: [1]
return (image, encoded_boxes, encoded_classes, num_matched_boxes)
else:
image = tf.image.decode_jpeg(image_buffer)
image = tf.image.resize_images(
image, size=(ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE))
# resize_image returns image of dtype float32 and does not change its
# range. Divide by 255 to convert image to [0, 1] range.
image /= 255.
image = ssd_dataloader.normalize_image(image)
image = tf.cast(image, self.dtype)
def trim_and_pad(inp_tensor):
"""Limit the number of boxes, and pad if necessary."""
inp_tensor = inp_tensor[:ssd_constants.MAX_NUM_EVAL_BOXES]
num_pad = ssd_constants.MAX_NUM_EVAL_BOXES - tf.shape(inp_tensor)[0]
inp_tensor = tf.pad(inp_tensor, [[0, num_pad], [0, 0]])
return tf.reshape(inp_tensor, [ssd_constants.MAX_NUM_EVAL_BOXES,
inp_tensor.get_shape()[1]])
boxes, classes = trim_and_pad(boxes), trim_and_pad(classes)
# Shape of boxes: [MAX_NUM_EVAL_BOXES, 4]
# Shape of classes: [MAX_NUM_EVAL_BOXES, 1]
# Shape of source_id: [] (scalar tensor)
# Shape of raw_shape: [3]
return (image, boxes, classes, source_id, raw_shape)
def create_dataset(self,
batch_size,
num_splits,
batch_size_per_split,
dataset,
subset,
train,
datasets_repeat_cached_sample,
num_threads=None,
datasets_use_caching=False,
datasets_parallel_interleave_cycle_length=None,
datasets_sloppy_parallel_interleave=False,
datasets_parallel_interleave_prefetch=None):
"""Creates a dataset for the benchmark."""
try:
import ssd_dataloader # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs by '
'following https://github.com/tensorflow/models/blob/'
'master/research/object_detection/g3doc/installation.md'
'#protobuf-compilation')
assert self.supports_datasets()
glob_pattern = dataset.tf_record_pattern(subset)
ds = tf.data.TFRecordDataset.list_files(glob_pattern, shuffle=train)
# TODO(haoyuzhang): Enable map+filter fusion after cl/218399112 in release
# options = tf.data.Options()
# options.experimental_optimization = tf.data.experimental.OptimizationOptions() # pylint: disable=line-too-long
# options.experimental_optimization.map_and_filter_fusion = True
# ds = ds.with_options(options)
ds = ds.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
cycle_length=datasets_parallel_interleave_cycle_length or 10,
sloppy=datasets_sloppy_parallel_interleave))
mlperf.logger.log(key=mlperf.tags.INPUT_ORDER)
if datasets_repeat_cached_sample:
# Repeat a single sample element indefinitely to emulate memory-speed IO.
ds = ds.take(1).cache().repeat()
ds = ds.prefetch(buffer_size=batch_size)
if datasets_use_caching:
ds = ds.cache()
if train:
ds = ds.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=10000))
mlperf.logger.log(key=mlperf.tags.INPUT_SHARD, value=10000)
mlperf.logger.log(key=mlperf.tags.INPUT_ORDER)
else:
ds = ds.repeat()
ds = ds.map(ssd_dataloader.ssd_parse_example_proto, num_parallel_calls=64)
ds = ds.filter(
lambda data: tf.greater(tf.shape(data['groundtruth_boxes'])[0], 0))
ds = ds.apply(
tf.data.experimental.map_and_batch(
map_func=self.preprocess,
batch_size=batch_size_per_split,
num_parallel_batches=num_splits,
drop_remainder=train))
ds = ds.prefetch(buffer_size=num_splits)
if num_threads:
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = num_threads
ds = ds.with_options(options)
return ds
def supports_datasets(self):
return True
class TestImagePreprocessor(BaseImagePreprocessor):
"""Preprocessor used for testing.
set_fake_data() sets which images and labels will be output by minibatch(),
and must be called before minibatch(). This allows tests to easily specify
a set of images to use for training, without having to create any files.
Queue runners must be started for this preprocessor to work.
"""
def __init__(self,
batch_size,
output_shapes,
num_splits,
dtype,
train=None,
distortions=None,
resize_method=None,
shift_ratio=0,
summary_verbosity=0,
distort_color_in_yiq=False,
fuse_decode_and_crop=False,
match_mlperf=False):
super(TestImagePreprocessor, self).__init__(
batch_size, output_shapes, num_splits, dtype, train, distortions,
resize_method, shift_ratio, summary_verbosity=summary_verbosity,
distort_color_in_yiq=distort_color_in_yiq,
fuse_decode_and_crop=fuse_decode_and_crop, match_mlperf=match_mlperf)
self.expected_subset = None
def set_fake_data(self, fake_images, fake_labels):
assert len(fake_images.shape) == 4
assert len(fake_labels.shape) == 1
num_images = fake_images.shape[0]
assert num_images == fake_labels.shape[0]
assert num_images % self.batch_size == 0
self.fake_images = fake_images
self.fake_labels = fake_labels
def minibatch(self,
dataset,
subset,
params,
shift_ratio=0):
"""Get test image batches."""
del dataset, params
if (not hasattr(self, 'fake_images') or
not hasattr(self, 'fake_labels')):
raise ValueError('Must call set_fake_data() before calling minibatch '
'on TestImagePreprocessor')
if self.expected_subset is not None:
assert subset == self.expected_subset
shift_ratio = shift_ratio or self.shift_ratio
fake_images = cnn_util.roll_numpy_batches(self.fake_images, self.batch_size,
shift_ratio)
fake_labels = cnn_util.roll_numpy_batches(self.fake_labels, self.batch_size,
shift_ratio)
with tf.name_scope('batch_processing'):
image_slice, label_slice = tf.train.slice_input_producer(
[fake_images, fake_labels],
shuffle=False,
name='image_slice')
raw_images, raw_labels = tf.train.batch(
[image_slice, label_slice], batch_size=self.batch_size,
name='image_batch')
images = [[] for _ in range(self.num_splits)]
labels = [[] for _ in range(self.num_splits)]
for i in xrange(self.batch_size):
split_index = i % self.num_splits
raw_image = tf.cast(raw_images[i], self.dtype)
images[split_index].append(raw_image)
labels[split_index].append(raw_labels[i])
for split_index in xrange(self.num_splits):
images[split_index] = tf.parallel_stack(images[split_index])
labels[split_index] = tf.parallel_stack(labels[split_index])
normalized = [normalized_image(part) for part in images]
return [[tf.cast(part, self.dtype) for part in normalized], labels]
class LibrispeechPreprocessor(InputPreprocessor):
"""Preprocessor for librispeech class for all image model preprocessors."""
def __init__(self, batch_size, output_shapes, num_splits, dtype, train,
**kwargs):
del kwargs
super(LibrispeechPreprocessor, self).__init__(batch_size, output_shapes)
self.num_splits = num_splits
self.dtype = dtype
self.is_train = train
if self.batch_size % self.num_splits != 0:
raise ValueError(('batch_size must be a multiple of num_splits: '
'batch_size %d, num_splits: %d') % (self.batch_size,
self.num_splits))
self.batch_size_per_split = self.batch_size // self.num_splits
def create_dataset(self,
batch_size,
num_splits,
batch_size_per_split,
dataset,
subset,
train,
datasets_repeat_cached_sample,
num_threads=None,
datasets_use_caching=False,
datasets_parallel_interleave_cycle_length=None,
datasets_sloppy_parallel_interleave=False,
datasets_parallel_interleave_prefetch=None):
"""Creates a dataset for the benchmark."""
# TODO(laigd): currently the only difference between this and the one in
# BaseImagePreprocessor is, this uses map() and padded_batch() while the
# latter uses tf.data.experimental.map_and_batch(). Try to merge them.
assert self.supports_datasets()
glob_pattern = dataset.tf_record_pattern(subset)
file_names = gfile.Glob(glob_pattern)
if not file_names:
raise ValueError('Found no files in --data_dir matching: {}'
.format(glob_pattern))
ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=train)
ds = ds.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
cycle_length=datasets_parallel_interleave_cycle_length or 10,
sloppy=datasets_sloppy_parallel_interleave,
prefetch_input_elements=datasets_parallel_interleave_prefetch))
if datasets_repeat_cached_sample:
# Repeat a single sample element indefinitely to emulate memory-speed IO.
ds = ds.take(1).cache().repeat()
counter = tf.data.Dataset.range(batch_size)
counter = counter.repeat()
ds = tf.data.Dataset.zip((ds, counter))
ds = ds.prefetch(buffer_size=batch_size)
if datasets_use_caching:
ds = ds.cache()
if train:
ds = ds.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=10000))
else:
ds = ds.repeat()
ds = ds.map(map_func=self.parse_and_preprocess,
num_parallel_calls=batch_size_per_split*num_splits)
ds = ds.padded_batch(
batch_size=batch_size_per_split,
padded_shapes=tuple([
tf.TensorShape(output_shape[1:])
for output_shape in self.output_shapes
]),
drop_remainder=True)
ds = ds.prefetch(buffer_size=num_splits)
if num_threads:
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = num_threads
ds = ds.with_options(options)
return ds
def minibatch(self, dataset, subset, params, shift_ratio=-1):
assert params.use_datasets
# TODO(laigd): unify this with CNNModel's minibatch()
# TODO(laigd): in distributed mode we use shift_ratio so different workers
# won't work on same inputs, so we should respect that.
del shift_ratio
with tf.name_scope('batch_processing'):
ds = self.create_dataset(
self.batch_size,
self.num_splits,
self.batch_size_per_split,
dataset,
subset,
self.is_train,
datasets_repeat_cached_sample=params.datasets_repeat_cached_sample,
num_threads=params.datasets_num_private_threads,
datasets_use_caching=params.datasets_use_caching,
datasets_parallel_interleave_cycle_length=(
params.datasets_parallel_interleave_cycle_length),
datasets_sloppy_parallel_interleave=(
params.datasets_sloppy_parallel_interleave),
datasets_parallel_interleave_prefetch=(
params.datasets_parallel_interleave_prefetch))
ds_iterator = self.create_iterator(ds)
# The four lists are: input spectrogram feature, labels, input lengths,
# label lengths
input_lists = [[None for _ in range(self.num_splits)] for _ in range(4)]
for d in xrange(self.num_splits):
input_list = ds_iterator.get_next()
for i in range(4):
input_lists[i][d] = input_list[i]
assert self.output_shapes == [
input_lists[i][0].shape.as_list() for i in range(4)
]
return tuple(input_lists)
def supports_datasets(self):
return True
def parse_and_preprocess(self, value, batch_position):
"""Parse an TFRecord."""
del batch_position
assert self.supports_datasets()
context_features = {
'labels': tf.VarLenFeature(dtype=tf.int64),
'input_length': tf.FixedLenFeature([], dtype=tf.int64),
'label_length': tf.FixedLenFeature([], dtype=tf.int64),
}
sequence_features = {
'features': tf.FixedLenSequenceFeature([161], dtype=tf.float32)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=value,
context_features=context_features,
sequence_features=sequence_features,
)
return [
# Input
tf.expand_dims(sequence_parsed['features'], axis=2),
# Label
tf.cast(
tf.reshape(
tf.sparse_tensor_to_dense(context_parsed['labels']), [-1]),
dtype=tf.int32),
# Input length
tf.cast(
tf.reshape(context_parsed['input_length'], [1]),
dtype=tf.int32),
# Label length
tf.cast(
tf.reshape(context_parsed['label_length'], [1]),
dtype=tf.int32),
]
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtai... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\... | ```python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosophy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import re
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
# TODO(b/118385027): Dependency on keras can be problematic if Keras moves out
# of the main repo.
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
_FEATURE_COLUMN_DEPRECATION_DATE = None
_FEATURE_COLUMN_DEPRECATION = ('The old _FeatureColumn APIs are being '
'deprecated. Please use the new FeatureColumn '
'APIs instead.')
class StateManager(object):
"""Manages the state associated with FeatureColumns.
Some `FeatureColumn`s create variables or resources to assist their
computation. The `StateManager` is responsible for creating and storing these
objects since `FeatureColumn`s are supposed to be stateless configuration
only.
"""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
"""Creates a new variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
trainable: Whether this variable is trainable or not.
use_resource: If true, we use resource variables. Otherwise we use
RefVariable.
initializer: initializer instance (callable).
Returns:
The created variable.
"""
del feature_column, name, shape, dtype, trainable, use_resource, initializer
raise NotImplementedError('StateManager.create_variable')
def add_variable(self, feature_column, var):
"""Adds an existing variable to the state.
Args:
feature_column: A `FeatureColumn` object to associate this variable with.
var: The variable.
"""
del feature_column, var
raise NotImplementedError('StateManager.add_variable')
def get_variable(self, feature_column, name):
"""Returns an existing variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_var')
def add_resource(self, feature_column, name, resource):
"""Creates a new resource.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this resource corresponds to.
name: Name of the resource.
resource: The resource.
Returns:
The created resource.
"""
del feature_column, name, resource
raise NotImplementedError('StateManager.add_resource')
def has_resource(self, feature_column, name):
"""Returns true iff a resource with same name exists.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.has_resource')
def get_resource(self, feature_column, name):
"""Returns an already created resource.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_resource')
class _StateManagerImpl(StateManager):
"""Manages the state of DenseFeatures and LinearLayer."""
def __init__(self, layer, trainable):
"""Creates an _StateManagerImpl object.
Args:
layer: The input layer this state manager is associated with.
trainable: Whether by default, variables created are trainable or not.
"""
self._trainable = trainable
self._layer = layer
if self._layer is not None and not hasattr(self._layer, '_resources'):
self._layer._resources = data_structures.Mapping() # pylint: disable=protected-access
self._cols_to_vars_map = collections.defaultdict(lambda: {})
self._cols_to_resources_map = collections.defaultdict(lambda: {})
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_weight call does.
with trackable.no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
if isinstance(var, variables.PartitionedVariable):
for v in var:
part_name = name + '/' + str(v._get_save_slice_info().var_offset[0]) # pylint: disable=protected-access
self._layer._track_trackable(v, feature_column.name + '/' + part_name) # pylint: disable=protected-access
else:
if isinstance(var, trackable.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
def get_variable(self, feature_column, name):
if name in self._cols_to_vars_map[feature_column]:
return self._cols_to_vars_map[feature_column][name]
raise ValueError('Variable does not exist.')
def add_resource(self, feature_column, resource_name, resource):
self._cols_to_resources_map[feature_column][resource_name] = resource
# pylint: disable=protected-access
if self._layer is not None and isinstance(resource, trackable.Trackable):
# Add trackable resources to the layer for serialization.
if feature_column.name not in self._layer._resources:
self._layer._resources[feature_column.name] = data_structures.Mapping()
if resource_name not in self._layer._resources[feature_column.name]:
self._layer._resources[feature_column.name][resource_name] = resource
# pylint: enable=protected-access
def has_resource(self, feature_column, resource_name):
return resource_name in self._cols_to_resources_map[feature_column]
def get_resource(self, feature_column, resource_name):
if (feature_column not in self._cols_to_resources_map or
resource_name not in self._cols_to_resources_map[feature_column]):
raise ValueError('Resource does not exist.')
return self._cols_to_resources_map[feature_column][resource_name]
class _StateManagerImplV2(_StateManagerImpl):
"""Manages the state of DenseFeatures."""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_weight call does.
with trackable.no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource)
if isinstance(var, trackable.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
class _BaseFeaturesLayer(Layer):
"""Base class for DenseFeatures and SequenceFeatures.
Defines common methods and helpers.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model.
expected_column_type: Expected class for provided feature columns.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` doesn't match
`expected_column_type`.
"""
def __init__(self,
feature_columns,
expected_column_type,
trainable,
name,
partitioner=None,
**kwargs):
super(_BaseFeaturesLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
self._state_manager = _StateManagerImpl(self, self.trainable)
self._partitioner = partitioner
for column in self._feature_columns:
if not isinstance(column, expected_column_type):
raise ValueError(
'Items of feature_columns must be a {}. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(
expected_column_type, column))
def build(self, _):
for column in self._feature_columns:
with variable_scope._pure_variable_scope( # pylint: disable=protected-access
self.name,
partitioner=self._partitioner):
with variable_scope._pure_variable_scope( # pylint: disable=protected-access
_sanitize_column_name_for_variable_scope(column.name)):
column.create_state(self._state_manager)
super(_BaseFeaturesLayer, self).build(None)
def _output_shape(self, input_shape, num_elements):
"""Computes expected output shape of the layer or a column's dense tensor.
Args:
input_shape: Tensor or array with batch shape.
num_elements: Size of the last dimension of the output.
Returns:
Tuple with output shape.
"""
raise NotImplementedError('Calling an abstract method.')
def compute_output_shape(self, input_shape):
total_elements = 0
for column in self._feature_columns:
total_elements += column.variable_shape.num_elements()
return self._target_shape(input_shape, total_elements)
def _process_dense_tensor(self, column, tensor):
"""Reshapes the dense tensor output of a column based on expected shape.
Args:
column: A DenseColumn or SequenceDenseColumn object.
tensor: A dense tensor obtained from the same column.
Returns:
Reshaped dense tensor."""
num_elements = column.variable_shape.num_elements()
target_shape = self._target_shape(array_ops.shape(tensor), num_elements)
return array_ops.reshape(tensor, shape=target_shape)
def _verify_and_concat_tensors(self, output_tensors):
"""Verifies and concatenates the dense output of several columns."""
_verify_static_batch_size_equality(output_tensors, self._feature_columns)
return array_ops.concat(output_tensors, -1)
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {'feature_columns': column_configs}
config['partitioner'] = generic_utils.serialize_keras_object(
self._partitioner)
base_config = super( # pylint: disable=bad-super-call
_BaseFeaturesLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
config_cp['feature_columns'] = serialization.deserialize_feature_columns(
config['feature_columns'], custom_objects=custom_objects)
config_cp['partitioner'] = generic_utils.deserialize_keras_object(
config['partitioner'], custom_objects)
return cls(**config_cp)
class _LinearModelLayer(Layer):
"""Layer that contains logic for `LinearModel`."""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
super(_LinearModelLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
for column in self._feature_columns:
if not isinstance(column, (DenseColumn, CategoricalColumn)):
raise ValueError(
'Items of feature_columns must be either a '
'DenseColumn or CategoricalColumn. Given: {}'.format(column))
self._units = units
self._sparse_combiner = sparse_combiner
self._state_manager = _StateManagerImpl(self, self.trainable)
self.bias = None
def build(self, _):
# We need variable scopes for now because we want the variable partitioning
# information to percolate down. We also use _pure_variable_scope's here
# since we want to open up a name_scope in the `call` method while creating
# the ops.
with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access
for column in self._feature_columns:
with variable_scope._pure_variable_scope( # pylint: disable=protected-access
_sanitize_column_name_for_variable_scope(column.name)):
# Create the state for each feature column
column.create_state(self._state_manager)
# Create a weight variable for each column.
if isinstance(column, CategoricalColumn):
first_dim = column.num_buckets
else:
first_dim = column.variable_shape.num_elements()
self._state_manager.create_variable(
column,
name='weights',
dtype=dtypes.float32,
shape=(first_dim, self._units),
initializer=initializers.zeros(),
trainable=self.trainable)
# Create a bias variable.
self.bias = self.add_variable(
name='bias_weights',
dtype=dtypes.float32,
shape=[self._units],
initializer=initializers.zeros(),
trainable=self.trainable,
use_resource=True,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
super(_LinearModelLayer, self).build(None)
def call(self, features):
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: {}'
.format(features))
with ops.name_scope(self.name):
transformation_cache = FeatureTransformationCache(features)
weighted_sums = []
for column in self._feature_columns:
with ops.name_scope(
_sanitize_column_name_for_variable_scope(column.name)):
# All the weights used in the linear model are owned by the state
# manager associated with this Linear Model.
weight_var = self._state_manager.get_variable(column, 'weights')
weighted_sum = _create_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=self._state_manager,
sparse_combiner=self._sparse_combiner,
weight_var=weight_var)
weighted_sums.append(weighted_sum)
_verify_static_batch_size_equality(weighted_sums, self._feature_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
predictions = nn_ops.bias_add(
predictions_no_bias, self.bias, name='weighted_sum')
return predictions
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {
'feature_columns': column_configs,
'units': self._units,
'sparse_combiner': self._sparse_combiner
}
base_config = super( # pylint: disable=bad-super-call
_LinearModelLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
columns = serialization.deserialize_feature_columns(
config_cp['feature_columns'], custom_objects=custom_objects)
del config_cp['feature_columns']
return cls(feature_columns=columns, **config_cp)
# TODO(tanzheny): Cleanup it with respect to Premade model b/132690565.
class LinearModel(keras_training.Model):
"""Produces a linear prediction `Tensor` based on given `feature_columns`.
This layer generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `LinearLayer` treats categorical columns as
`indicator_column`s. To be specific, assume the input as `SparseTensor` looks
like:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
`linear_model` assigns weights for the presence of "a", "b", "c' implicitly,
just like `indicator_column`, while `input_layer` explicitly requires wrapping
each of categorical columns with an `embedding_column` or an
`indicator_column`.
Example of usage:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
linear_model = LinearLayer(columns)
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features)
```
"""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
"""Constructs a LinearLayer.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. Except `numeric_column`, almost all columns passed to
`linear_model` are considered as categorical columns. It combines each
categorical column independently. Currently "mean", "sqrtn" and "sum"
are supported, with "sum" the default for linear model. "sqrtn" often
achieves good accuracy, in particular with bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For example, for two features represented as the categorical columns:
```python
# Feature 1
shape = [2, 2]
{
[0, 0]: "a"
[0, 1]: "b"
[1, 0]: "c"
}
# Feature 2
shape = [2, 3]
{
[0, 0]: "d"
[1, 0]: "e"
[1, 1]: "f"
[1, 2]: "g"
}
```
with `sparse_combiner` as "mean", the linear model outputs conceptually
are
```
y_0 = 1.0 / 2.0 * ( w_a + w_ b) + w_c + b_0
y_1 = w_d + 1.0 / 3.0 * ( w_e + w_ f + w_g) + b_1
```
where `y_i` is the output, `b_i` is the bias, and `w_x` is the weight
assigned to the presence of `x` in the input features.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: Name to give to the Linear Model. All variables and ops created will
be scoped by this name.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is neither a `DenseColumn`
nor `CategoricalColumn`.
"""
super(LinearModel, self).__init__(name=name, **kwargs)
self.layer = _LinearModelLayer(
feature_columns,
units,
sparse_combiner,
trainable,
name=self.name,
**kwargs)
def call(self, features):
"""Returns a `Tensor` the represents the predictions of a linear model.
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at 'price'
key in this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its
shape is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: If features are not a dictionary.
"""
return self.layer(features)
@property
def bias(self):
return self.layer.bias
def _transform_features_v2(features, feature_columns, state_manager):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `FeatureColumn`.
feature_columns: An iterable containing all the `FeatureColumn`s.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
A `dict` mapping `FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _normalize_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
transformation_cache = FeatureTransformationCache(features)
for column in feature_columns:
with ops.name_scope(
None,
default_name=_sanitize_column_name_for_variable_scope(column.name)):
outputs[column] = transformation_cache.get(column, state_manager)
return outputs
@tf_export('feature_column.make_parse_example_spec', v1=[])
def make_parse_example_spec_v2(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in
`tf.io.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_a = tf.feature_column.categorical_column_with_vocabulary_file(...)
feature_b = tf.feature_column.numeric_column(...)
feature_c_bucketized = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("feature_c"), ...)
feature_a_x_feature_c = tf.feature_column.crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.io.parse_example(
serialized=serialized_examples,
features=tf.feature_column.make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('All feature_columns must be FeatureColumn instances. '
'Given: {}'.format(column))
config = column.parse_example_spec
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
@tf_export('feature_column.embedding_column')
def embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=True):
"""`DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
`DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = initializers.truncated_normal(
mean=0.0, stddev=1 / math.sqrt(dimension))
return EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@tf_export(v1=['feature_column.shared_embedding_columns'])
def shared_embedding_columns(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = initializers.truncated_normal(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0._num_buckets # pylint: disable=protected-access
if not isinstance(c0, fc_old._CategoricalColumn): # pylint: disable=protected-access
raise ValueError(
'All categorical_columns must be subclasses of _CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
while isinstance(
c0, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn, # pylint: disable=protected-access
fc_old._SequenceCategoricalColumn, SequenceCategoricalColumn)): # pylint: disable=protected-access
c0 = c0.categorical_column
for c in sorted_columns[1:]:
while isinstance(
c, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn, # pylint: disable=protected-access
fc_old._SequenceCategoricalColumn, SequenceCategoricalColumn)): # pylint: disable=protected-access
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column or sequence column '
'of the same type. Given column: {} of type: {} does not match given '
'column: {} of type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
result = []
for column in categorical_columns:
result.append(
fc_old._SharedEmbeddingColumn( # pylint: disable=protected-access
categorical_column=column,
initializer=initializer,
dimension=dimension,
combiner=combiner,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
use_safe_embedding_lookup=use_safe_embedding_lookup))
return result
@tf_export('feature_column.shared_embeddings', v1=[])
def shared_embedding_columns_v2(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional collective name of these columns.
If not given, a reasonable name will be chosen based on the names of
`categorical_columns`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = initializers.truncated_normal(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0.num_buckets
if not isinstance(c0, CategoricalColumn):
raise ValueError(
'All categorical_columns must be subclasses of CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
while isinstance(c0, (WeightedCategoricalColumn, SequenceCategoricalColumn)):
c0 = c0.categorical_column
for c in sorted_columns[1:]:
while isinstance(c, (WeightedCategoricalColumn, SequenceCategoricalColumn)):
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column or sequence column '
'of the same type. Given column: {} of type: {} does not match given '
'column: {} of type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c.num_buckets:
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c.num_buckets))
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
column_creator = SharedEmbeddingColumnCreator(
dimension, initializer, ckpt_to_load_from, tensor_name_in_ckpt,
num_buckets, trainable, shared_embedding_collection_name,
use_safe_embedding_lookup)
result = []
for column in categorical_columns:
result.append(
column_creator(
categorical_column=column, combiner=combiner, max_norm=max_norm))
return result
@tf_export('feature_column.numeric_column')
def numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.io.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = fc_utils.check_default_value(
shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
fc_utils.assert_key_is_string(key)
return NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
@tf_export('feature_column.bucketized_column')
def bucketized_column(source_column, boundaries):
"""Represents discretized dense input bucketed by `boundaries`.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = tf.feature_column.numeric_column('price')
bucketized_price = tf.feature_column.bucketized_column(
price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = tf.keras.layers.DenseFeatures(columns)(features)
```
A `bucketized_column` can also be crossed with another categorical column
using `crossed_column`:
```python
price = tf.feature_column.numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = tf.feature_column.bucketized_column(
price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = tf.feature_column.crossed_column(
[bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = tf.keras.layers.DenseFeatures(columns)(features)
linear_model = tf.keras.experimental.LinearModel(units=...)(dense_tensor)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, (NumericColumn, fc_old._NumericColumn)): # pylint: disable=protected-access
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if not boundaries:
raise ValueError('boundaries must not be empty.')
if not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return BucketizedColumn(source_column, tuple(boundaries))
@tf_export('feature_column.categorical_column_with_hash_bucket')
def categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
fc_utils.assert_key_is_string(key)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return HashedCategoricalColumn(key, hash_bucket_size, dtype)
@tf_export(v1=['feature_column.categorical_column_with_vocabulary_file'])
def categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return categorical_column_with_vocabulary_file_v2(
key, vocabulary_file, vocabulary_size,
dtype, default_value,
num_oov_buckets)
@tf_export('feature_column.categorical_column_with_vocabulary_file', v1=[])
def categorical_column_with_vocabulary_file_v2(key,
vocabulary_file,
vocabulary_size=None,
dtype=dtypes.string,
default_value=None,
num_oov_buckets=0):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File `'/us/states.txt'` contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File `'/us/states.txt'` contains 51 lines - the first line is `'XX'`, and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal
`'XX'` in input, and other values missing from the file, will be assigned
ID 0. All others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
dtype: The type of features. Only string and integer types are supported.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file, mode='rb') as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
@tf_export('feature_column.categorical_column_with_vocabulary_list')
def categorical_column_with_vocabulary_list(key,
vocabulary_list,
dtype=None,
default_value=-1,
num_oov_buckets=0):
"""A `CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the column
name and the dictionary key for feature parsing configs, feature `Tensor`
objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported. If
`None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyListCategoricalColumn(
key=key,
vocabulary_list=tuple(vocabulary_list),
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets)
@tf_export('feature_column.categorical_column_with_identity')
def categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If set, values outside of range `[0, num_buckets)` will
be replaced with this value. If not set, values >= num_buckets will
cause a failure while values < 0 will be dropped.
Returns:
A `CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
fc_utils.assert_key_is_string(key)
return IdentityCategoricalColumn(
key=key, number_buckets=num_buckets, default_value=default_value)
@tf_export('feature_column.indicator_column')
def indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
- For DNN model, `indicator_column` can be used to wrap any
`categorical_column_*` (e.g., to feed to DNN). Consider to Use
`embedding_column` if the number of buckets/unique(values) are large.
- For Wide (aka linear) model, `indicator_column` is the internal
representation for categorical column when passing categorical column
directly (as any element in feature_columns) to `linear_model`. See
`linear_model` for details.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda']))
columns = [name, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `IndicatorColumn`.
Raises:
ValueError: If `categorical_column` is not CategoricalColumn type.
"""
if not isinstance(categorical_column,
(CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'Unsupported input type. Input must be a CategoricalColumn. '
'Given: {}'.format(categorical_column))
return IndicatorColumn(categorical_column)
@tf_export('feature_column.weighted_categorical_column')
def weighted_categorical_column(categorical_column,
weight_feature_key,
dtype=dtypes.float32):
"""Applies weight values to a `CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
@tf_export('feature_column.crossed_column')
def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `CategoricalColumn`.
ValueError: If any of the keys is `HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn))): # pylint: disable=protected-access
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key,
(HashedCategoricalColumn, fc_old._HashedCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)
@six.add_metaclass(abc.ABCMeta)
class FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish between the concept of a feature family and a specific binary
feature within a family, we refer to a feature family like "country" as a
feature column. For example, we can have a feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. Users should not create instances of this.
"""
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming."""
pass
def __lt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
In CPython, `__lt__` must be defined for all objects in the
sequence being sorted.
If any objects in teh sequence being sorted do not have an `__lt__` method
compatible with feature column objects (such as strings), then CPython will
fall back to using the `__gt__` method below.
https://docs.python.org/3/library/stdtypes.html#list.sort
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically less
than the string representation of `other`. For FeatureColumn objects,
this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) < str(other)
def __gt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
`__gt__` is called when the "other" object being compared during the sort
does not have `__lt__` defined.
Example:
```
# __lt__ only class
class A():
def __lt__(self, other): return str(self) < str(other)
a = A()
a < "b" # True
"0" < a # Error
# __lt__ and __gt__ class
class B():
def __lt__(self, other): return str(self) < str(other)
def __gt__(self, other): return str(self) > str(other)
b = B()
b < "c" # True
"0" < b # True
```
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically
greater than the string representation of `other`. For FeatureColumn
objects, this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) > str(other)
@abc.abstractmethod
def transform_feature(self, transformation_cache, state_manager):
"""Returns intermediate representation (usually a `Tensor`).
Uses `transformation_cache` to create an intermediate representation
(usually a `Tensor`) that other feature columns can use.
Example usage of `transformation_cache`:
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). To access corresponding `Tensor`s,
transformation_cache will be used as follows:
```python
raw_tensor = transformation_cache.get('raw', state_manager)
fc_tensor = transformation_cache.get(input_fc, state_manager)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is
a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of `tf.io.parse_example` for
all supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). One possible implementation of
parse_example_spec is as follows:
```python
spec = {'raw': tf.io.FixedLenFeature(...)}
spec.update(input_fc.parse_example_spec)
return spec
```
"""
pass
def create_state(self, state_manager):
"""Uses the `state_manager` to create state for the FeatureColumn.
Args:
state_manager: A `StateManager` to create / access resources such as
lookup tables and variables.
"""
pass
@abc.abstractproperty
def _is_v2_column(self):
"""Returns whether this FeatureColumn is fully conformant to the new API.
This is needed for composition type cases where an EmbeddingColumn etc.
might take in old categorical columns as input and then we want to use the
old API.
"""
pass
@abc.abstractproperty
def parents(self):
"""Returns a list of immediate raw feature and FeatureColumn dependencies.
For example:
# For the following feature columns
a = numeric_column('f1')
c = crossed_column(a, 'f2')
# The expected parents are:
a.parents = ['f1']
c.parents = [a, 'f2']
"""
pass
def get_config(self):
"""Returns the config of the feature column.
A FeatureColumn config is a Python dictionary (serializable) containing the
configuration of a FeatureColumn. The same FeatureColumn can be
reinstantiated later from this configuration.
The config of a feature column does not include information about feature
columns depending on it nor the FeatureColumn class name.
Example with (de)serialization practices followed in this file:
```python
class SerializationExampleFeatureColumn(
FeatureColumn, collections.namedtuple(
'SerializationExampleFeatureColumn',
('dimension', 'parent', 'dtype', 'normalizer_fn'))):
def get_config(self):
# Create a dict from the namedtuple.
# Python attribute literals can be directly copied from / to the config.
# For example 'dimension', assuming it is an integer literal.
config = dict(zip(self._fields, self))
# (De)serialization of parent FeatureColumns should use the provided
# (de)serialize_feature_column() methods that take care of de-duping.
config['parent'] = serialize_feature_column(self.parent)
# Many objects provide custom (de)serialization e.g: for tf.DType
# tf.DType.name, tf.as_dtype() can be used.
config['dtype'] = self.dtype.name
# Non-trivial dependencies should be Keras-(de)serializable.
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
# This should do the inverse transform from `get_config` and construct
# the namedtuple.
kwargs = config.copy()
kwargs['parent'] = deserialize_feature_column(
config['parent'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
return cls(**kwargs)
```
Returns:
A serializable Dict that can be used to deserialize the object with
from_config.
"""
return self._get_config()
def _get_config(self):
raise NotImplementedError('Must be implemented in subclasses.')
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""Creates a FeatureColumn from its config.
This method should be the reverse of `get_config`, capable of instantiating
the same FeatureColumn from the config dictionary. See `get_config` for an
example of common (de)serialization practices followed in this file.
TODO(b/118939620): This is a private method until consensus is reached on
supporting object deserialization deduping within Keras.
Args:
config: A Dict config acquired with `get_config`.
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
columns_by_name: A Dict[String, FeatureColumn] of existing columns in
order to avoid duplication. Should be passed to any calls to
deserialize_feature_column().
Returns:
A FeatureColumn for the input config.
"""
return cls._from_config(config, custom_objects, columns_by_name)
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
raise NotImplementedError('Must be implemented in subclasses.')
class DenseColumn(FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
@abc.abstractproperty
def variable_shape(self):
"""`TensorShape` of `get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc.get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
`Tensor` of shape [batch_size] + `variable_shape`.
"""
pass
def is_feature_column_v2(feature_columns):
"""Returns True if all feature columns are V2."""
for feature_column in feature_columns:
if not isinstance(feature_column, FeatureColumn):
return False
if not feature_column._is_v2_column: # pylint: disable=protected-access
return False
return True
def _create_weighted_sum(column, transformation_cache, state_manager,
sparse_combiner, weight_var):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
sparse_combiner=sparse_combiner,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column, transformation_cache,
state_manager, weight_var):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column.get_dense_tensor(transformation_cache, state_manager)
num_elements = column.variable_shape.num_elements()
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
return math_ops.matmul(tensor, weight_var, name='weighted_sum')
class CategoricalColumn(FeatureColumn):
"""Represents a categorical feature.
A categorical feature typically handled with a `tf.sparse.SparseTensor` of
IDs.
"""
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ('id_tensor', 'weight_tensor'))
@abc.abstractproperty
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
def _create_categorical_column_weighted_sum(
column, transformation_cache, state_manager, sparse_combiner, weight_var):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Create a weighted sum of a categorical column for linear_model.
Note to maintainer: As implementation details, the weighted sum is
implemented via embedding_lookup_sparse toward efficiency. Mathematically,
they are the same.
To be specific, conceptually, categorical column can be treated as multi-hot
vector. Say:
```python
x = [0 0 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `c` in this case, which is same as `w[2]`.
Another example is
```python
x = [0 1 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
For both cases, we can implement weighted sum via embedding_lookup with
sparse_combiner = "sum".
"""
sparse_tensors = column.get_sparse_tensors(transformation_cache,
state_manager)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
return embedding_ops.safe_embedding_lookup_sparse(
weight_var,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class SequenceDenseColumn(FeatureColumn):
"""Represents dense sequence data."""
TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
'TensorSequenceLengthPair', ('dense_tensor', 'sequence_length'))
@abc.abstractmethod
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
class FeatureTransformationCache(object):
"""Handles caching of transformations while building the model.
`FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`FeatureTransformationCache` caches all previously transformed columns.
Example:
We're trying to use the following `FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `FeatureTransformationCache` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `FeatureTransformationCache`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `FeatureColumn` key
means that this `Tensor` is the output of an existing `FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key, state_manager, training=None):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `FeatureColumn`.
state_manager: A StateManager object that holds the FeatureColumn state.
training: Boolean indicating whether to the column is being used in
training mode. This argument is passed to the transform_feature method
of any `FeatureColumn` that takes a `training` argument. For example, if
a `FeatureColumn` performed dropout, it could expose a `training`
argument to control whether the dropout should be applied.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if isinstance(key, six.string_types):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
if not isinstance(key, FeatureColumn):
raise TypeError('"key" must be either a "str" or "FeatureColumn". '
'Provided: {}'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
# Some columns may need information about whether the transformation is
# happening in training or prediction mode, but not all columns expose this
# argument.
try:
transformed = column.transform_feature(
self, state_manager, training=training)
except TypeError:
transformed = column.transform_feature(self, state_manager)
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], 1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where_v2(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _normalize_feature_columns(feature_columns):
"""Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc.
"""
if isinstance(feature_columns, FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections_abc.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('Items of feature_columns must be a FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return sorted(feature_columns, key=lambda x: x.name)
class NumericColumn(
DenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'NumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""see `numeric_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
return self._transform_input_tensor(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
Raises:
ValueError: If a SparseTensor is passed in.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
return self._transform_input_tensor(input_tensor)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(self.shape)
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing numeric feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
"""
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class BucketizedColumn(
DenseColumn,
CategoricalColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('BucketizedColumn',
('source_column', 'boundaries'))):
"""See `bucketized_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.source_column, FeatureColumn) and
self.source_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_bucketized'.format(self.source_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.source_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
def transform_feature(self, transformation_cache, state_manager):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = transformation_cache.get(self.source_column, state_manager)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def _get_dense_tensor_for_input_tensor(self, input_tensor):
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns one hot encoded dense `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def _get_sparse_tensors_for_input_tensor(self, input_tensor):
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return CategoricalColumn.IdWeightPair(sparse_tensor, None)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.source_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['source_column'] = serialize_feature_column(self.source_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['source_column'] = deserialize_feature_column(
config['source_column'], custom_objects, columns_by_name)
return cls(**kwargs)
class EmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'EmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'initializer',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable',
'use_safe_embedding_lookup'))):
"""See `embedding_column`."""
def __new__(cls,
categorical_column,
dimension,
combiner,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
max_norm,
trainable,
use_safe_embedding_lookup=True):
return super(EmbeddingColumn, cls).__new__(
cls,
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""Transforms underlying `categorical_column`."""
return transformation_cache.get(self.categorical_column, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape([self.dimension])
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def create_state(self, state_manager):
"""Creates the embedding lookup variable."""
default_num_buckets = (self.categorical_column.num_buckets
if self._is_v2_column
else self.categorical_column._num_buckets) # pylint: disable=protected-access
num_buckets = getattr(self.categorical_column, 'num_buckets',
default_num_buckets)
embedding_shape = (num_buckets, self.dimension)
state_manager.create_variable(
self,
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
trainable=self.trainable,
use_resource=True,
initializer=self.initializer)
def _get_dense_tensor_internal_helper(self, sparse_tensors,
embedding_weights):
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
sparse_id_rank = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse
if (not self.use_safe_embedding_lookup and sparse_id_rank is not None and
sparse_id_rank <= 2):
embedding_lookup_sparse = embedding_ops.embedding_lookup_sparse
# Return embedding lookup result.
return embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor_internal(self, sparse_tensors, state_manager):
"""Private method that follows the signature of get_dense_tensor."""
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def _old_get_dense_tensor_internal(self, sparse_tensors, weight_collections,
trainable):
"""Private method that follows the signature of _get_dense_tensor."""
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
if (weight_collections and
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns tensor after doing the embedding lookup.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Embedding lookup tensor.
Raises:
ValueError: `categorical_column` is SequenceCategoricalColumn.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_dense_tensor_internal(sparse_tensors, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections, trainable)
return self._old_get_dense_tensor_internal(sparse_tensors,
weight_collections, trainable)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
dense_tensor = self._get_dense_tensor_internal(sparse_tensors,
state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
dense_tensor = self._old_get_dense_tensor_internal(
sparse_tensors,
weight_collections=weight_collections,
trainable=trainable)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['initializer'] = initializers.serialize(self.initializer)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
if 'use_safe_embedding_lookup' not in config:
config['use_safe_embedding_lookup'] = True
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['initializer'] = initializers.deserialize(
config['initializer'], custom_objects=custom_objects)
return cls(**kwargs)
def _raise_shared_embedding_column_error():
raise ValueError('SharedEmbeddingColumns are not supported in '
'`linear_model` or `input_layer`. Please use '
'`DenseFeatures` or `LinearModel` instead.')
class SharedEmbeddingColumnCreator(tracking.AutoTrackable):
def __init__(self,
dimension,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
num_buckets,
trainable,
name='shared_embedding_column_creator',
use_safe_embedding_lookup=True):
self._dimension = dimension
self._initializer = initializer
self._ckpt_to_load_from = ckpt_to_load_from
self._tensor_name_in_ckpt = tensor_name_in_ckpt
self._num_buckets = num_buckets
self._trainable = trainable
self._name = name
self._use_safe_embedding_lookup = use_safe_embedding_lookup
# Map from graph keys to embedding_weight variables.
self._embedding_weights = {}
def __call__(self, categorical_column, combiner, max_norm):
return SharedEmbeddingColumn(categorical_column, self, combiner, max_norm,
self._use_safe_embedding_lookup)
@property
def embedding_weights(self):
key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
if key not in self._embedding_weights:
embedding_shape = (self._num_buckets, self._dimension)
var = variable_scope.get_variable(
name=self._name,
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self._initializer,
trainable=self._trainable)
if self._ckpt_to_load_from is not None:
to_restore = var
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(
self._ckpt_to_load_from, {self._tensor_name_in_ckpt: to_restore})
self._embedding_weights[key] = var
return self._embedding_weights[key]
@property
def dimension(self):
return self._dimension
class SharedEmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'SharedEmbeddingColumn',
('categorical_column', 'shared_embedding_column_creator', 'combiner',
'max_norm', 'use_safe_embedding_lookup'))):
"""See `embedding_column`."""
def __new__(cls,
categorical_column,
shared_embedding_column_creator,
combiner,
max_norm,
use_safe_embedding_lookup=True):
return super(SharedEmbeddingColumn, cls).__new__(
cls,
categorical_column=categorical_column,
shared_embedding_column_creator=shared_embedding_column_creator,
combiner=combiner,
max_norm=max_norm,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_shared_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
def _parse_example_spec(self):
return _raise_shared_embedding_column_error()
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return transformation_cache.get(self.categorical_column, state_manager)
def _transform_feature(self, inputs):
return _raise_shared_embedding_column_error()
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
[self.shared_embedding_column_creator.dimension])
@property
def _variable_shape(self):
return _raise_shared_embedding_column_error()
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
"""Private method that follows the signature of _get_dense_tensor."""
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_weights = self.shared_embedding_column_creator.embedding_weights
sparse_id_rank = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse
if (not self.use_safe_embedding_lookup and sparse_id_rank is not None and
sparse_id_rank <= 2):
embedding_lookup_sparse = (embedding_ops.embedding_lookup_sparse)
# Return embedding lookup result.
return embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns the embedding lookup result."""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
return self._get_dense_tensor_internal(transformation_cache, state_manager)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _raise_shared_embedding_column_error()
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal(transformation_cache,
state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
return _raise_shared_embedding_column_error()
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, int):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
class HashedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('HashedCategoricalColumn',
('key', 'hash_bucket_size', 'dtype'))):
"""see `categorical_column_with_hash_bucket`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyFileCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('VocabularyFileCategoricalColumn',
('key', 'vocabulary_file', 'vocabulary_size',
'num_oov_buckets', 'dtype', 'default_value'))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
if state_manager is None or not state_manager.has_resource(self, name):
with ops.init_scope():
table = lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
else:
# Reuse the table from the previous run.
table = state_manager.get_resource(self, name)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyListCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'VocabularyListCategoricalColumn',
('key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'))
):
"""See `categorical_column_with_vocabulary_list`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary list."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
if state_manager is None or not state_manager.has_resource(self, name):
with ops.init_scope():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
else:
# Reuse the table from the previous run.
table = state_manager.get_resource(self, name)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary list."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class IdentityCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('IdentityCategoricalColumn',
('key', 'number_buckets', 'default_value'))):
"""See `categorical_column_with_identity`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Returns a SparseTensor with identity values."""
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = input_tensor.values
if input_tensor.values.dtype != dtypes.int64:
values = math_ops.cast(values, dtypes.int64, name='values')
if self.default_value is not None:
values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')
num_buckets = math_ops.cast(
self.num_buckets, dtypes.int64, name='num_buckets')
zero = math_ops.cast(0, dtypes.int64, name='zero')
# Assign default for out-of-range values.
values = array_ops.where_v2(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.cast(self.default_value, dtypes.int64),
name='default_values'), values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Returns a SparseTensor with identity values."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.number_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
return dict(zip(self._fields, self))
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
return cls(**kwargs)
class WeightedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'WeightedCategoricalColumn',
('categorical_column', 'weight_feature_key', 'dtype'))):
"""See `weighted_categorical_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = self.categorical_column.parse_example_spec
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def num_buckets(self):
"""See `DenseColumn` base class."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_weight_tensor(self, weight_tensor):
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return weight_tensor
def transform_feature(self, transformation_cache, state_manager):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = transformation_cache.get(self.weight_feature_key,
state_manager)
sparse_weight_tensor = self._transform_weight_tensor(weight_tensor)
sparse_categorical_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.categorical_column, state_manager))
return (sparse_categorical_tensor, sparse_weight_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = inputs.get(self.weight_feature_key)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
tensors = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column, self.weight_feature_key]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class CrossedColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('CrossedColumn',
('keys', 'hash_bucket_size', 'hash_key'))):
"""See `crossed_column`."""
@property
def _is_v2_column(self):
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
continue
if not isinstance(key, FeatureColumn):
return False
if not key._is_v2_column: # pylint: disable=protected-access
return False
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, (FeatureColumn, fc_old._FeatureColumn)): # pylint: disable=protected-access
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = {}
for key in self.keys:
if isinstance(key, FeatureColumn):
config.update(key.parse_example_spec)
elif isinstance(key, fc_old._FeatureColumn): # pylint: disable=protected-access
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def transform_feature(self, transformation_cache, state_manager):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(transformation_cache.get(key, state_manager))
elif isinstance(key, (fc_old._CategoricalColumn, CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key.get_sparse_tensors(transformation_cache,
state_manager)
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""See `CategoricalColumn` base class."""
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return list(self.keys)
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['keys'] = tuple([serialize_feature_column(fc) for fc in self.keys])
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['keys'] = tuple([
deserialize_feature_column(c, custom_objects, columns_by_name)
for c in config['keys']
])
return cls(**kwargs)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `CrossedColumn`.
Returns:
A list of strings or `CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
class IndicatorColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple('IndicatorColumn', ('categorical_column'))):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_indicator'.format(self.categorical_column.name)
def _transform_id_weight_pair(self, id_weight_pair, size):
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=int(size))
# Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
# Use scatter_nd to merge duplicated indices if existed,
# instead of sparse_tensor_to_dense.
return array_ops.scatter_nd(weighted_column.indices,
weighted_column.values,
weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor, depth=size, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
def transform_feature(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._transform_id_weight_pair(id_weight_pair,
self.variable_shape[-1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._transform_id_weight_pair(id_weight_pair,
self._variable_shape[-1])
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
if isinstance(self.categorical_column, FeatureColumn):
return tensor_shape.TensorShape([1, self.categorical_column.num_buckets])
else:
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
Raises:
ValueError: If `categorical_column` is a `SequenceCategoricalColumn`.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return inputs.get(self)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
dense_tensor = transformation_cache.get(self, state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _verify_static_batch_size_equality(tensors, columns):
"""Verify equality between static batch sizes.
Args:
tensors: iterable of input tensors.
columns: Corresponding feature columns.
Raises:
ValueError: in case of mismatched batch sizes.
"""
# bath_size is a Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(
tensors[i].shape[0]))
if batch_size.value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = batch_size
elif not expected_batch_size.is_compatible_with(batch_size):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, batch_size))
class SequenceCategoricalColumn(
CategoricalColumn,
fc_old._SequenceCategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('SequenceCategoricalColumn',
('categorical_column'))):
"""Represents sequences of categorical data."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return self.categorical_column.transform_feature(transformation_cache,
state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _get_sparse_tensors_helper(self, sparse_tensors):
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands third dimension, if necessary so that embeddings are not
# combined during embedding lookup. If the tensor is already 3D, leave
# as-is.
shape = array_ops.shape(id_tensor)
# Compute the third dimension explicitly instead of setting it to -1, as
# that doesn't work for dynamically shaped tensors with 0-length at runtime.
# This happens for empty sequences.
target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]
id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)
return CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_sparse_tensors_helper(sparse_tensors)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._get_sparse_tensors_helper(sparse_tensors)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _check_config_keys(config, expected_keys):
"""Checks that a config has all expected_keys."""
if set(config.keys()) != set(expected_keys):
raise ValueError('Invalid config: {}, expected keys: {}'.format(
config, expected_keys))
def _standardize_and_copy_config(config):
"""Returns a shallow copy of config with lists turned to tuples.
Keras serialization uses nest to listify everything.
This causes problems with the NumericColumn shape, which becomes
unhashable. We could try to solve this on the Keras side, but that
would require lots of tracking to avoid changing existing behavior.
Instead, we ensure here that we revive correctly.
Args:
config: dict that will be used to revive a Feature Column
Returns:
Shallow copy of config with lists turned to tuples.
"""
kwargs = config.copy()
for k, v in kwargs.items():
if isinstance(v, list):
kwargs[k] = tuple(v)
return kwargs
def _sanitize_column_name_for_variable_scope(name):
"""Sanitizes user-provided feature names for use as variable scopes."""
invalid_char = re.compile('[^A-Za-z0-9_.\\-]')
return invalid_char.sub('_', name)
``` |
[
{
"content": "Here is some code:\n```python\n# -*- coding: utf-8 -*-\nfrom django.utils import timezone\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.db import models, transaction\nfrom django.db.models import F\nfrom... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom django.utils import timezone\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.db import models, transaction\nfrom django.db.model... | ```python
# -*- coding: utf-8 -*-
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models, transaction
from django.db.models import F
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from .managers import CounterManager
__all__ = ['Counter', ]
@python_2_unicode_compatible
class Counter(models.Model):
""" Hits counter per date
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = GenericForeignKey(ct_field="content_type", fk_field="object_pk")
date = models.DateField(default=timezone.now, verbose_name=_('date'))
hits = models.PositiveIntegerField(default=0, verbose_name=_('hits count'))
# Manager
objects = CounterManager()
class Meta:
verbose_name = _('counter')
verbose_name_plural = _('counters')
unique_together = (('content_type', 'object_pk', 'date'), )
def __str__(self):
return '{date}: {hits}'.format(
date=self.date.strftime('%d-%m-%Y'),
hits=self.hits
)
@classmethod
@transaction.atomic()
def hit(cls, obj, amount=1, date=None):
""" Increase hits counter for particular object on date (now() by default)
:param obj: model object
:param amount: increase amount (1 by default)
:return: None
"""
ct = ContentType.objects.get_for_model(obj)
date = date or timezone.now()
obj, _ = cls.objects.get_or_create(content_type=ct, object_pk=obj._get_pk_val(), date=date,
defaults={'hits': 0})
cls.objects.filter(pk=obj.pk).update(hits=F('hits')+amount)
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# coding=utf-8\n#\n# Copyright © 2015 VMware, Inc. All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# coding=utf-8\n#\n# Copyright © 2015 VMware, Inc. All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\")... | ```python
# coding=utf-8
#
# Copyright © 2015 VMware, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
__author__ = 'yfauser'
from tests.config import *
from nsxramlclient.client import NsxClient
import time
client_session = NsxClient(nsxraml_file, nsxmanager, nsx_username, nsx_password, debug=True)
def test_segment_pools():
### Test Segment ID Pool Operations
# Get all configured Segment Pools
get_segment_resp = client_session.read('vdnSegmentPools')
client_session.view_response(get_segment_resp)
# Add a Segment Pool
segments_create_body = client_session.extract_resource_body_example('vdnSegmentPools', 'create')
client_session.view_body_dict(segments_create_body)
segments_create_body['segmentRange']['begin'] = '11002'
segments_create_body['segmentRange']['end'] = '11003'
segments_create_body['segmentRange']['name'] = 'legacy'
create_response = client_session.create('vdnSegmentPools', request_body_dict=segments_create_body)
client_session.view_response(create_response)
time.sleep(5)
# Update the new Segment Pool:
update_segment_body = client_session.extract_resource_body_example('vdnSegmentPool', 'update')
update_segment_body['segmentRange']['name'] = 'PythonTest'
update_segment_body['segmentRange']['end'] = '11005'
client_session.update('vdnSegmentPool', uri_parameters={'segmentPoolId': create_response['objectId']},
request_body_dict=update_segment_body)
time.sleep(5)
# Display a specific Segment pool (the new one)
specific_segement_resp = client_session.read('vdnSegmentPool', uri_parameters={'segmentPoolId':
create_response['objectId']})
client_session.view_response(specific_segement_resp)
time.sleep(5)
# Delete new Segment Pool
client_session.delete('vdnSegmentPool', uri_parameters={'segmentPoolId': create_response['objectId']})
def test_mcast_pools():
### Test Multicast Pool Operations
# Add a multicast Pool
mcastpool_create_body = client_session.extract_resource_body_example('vdnMulticastPools', 'create')
client_session.view_body_dict(mcastpool_create_body)
mcastpool_create_body['multicastRange']['desc'] = 'Test'
mcastpool_create_body['multicastRange']['begin'] = '235.0.0.0'
mcastpool_create_body['multicastRange']['end'] = '235.1.1.1'
mcastpool_create_body['multicastRange']['name'] = 'legacy'
create_response = client_session.create('vdnMulticastPools', request_body_dict=mcastpool_create_body)
client_session.view_response(create_response)
# Get all configured Multicast Pools
get_mcast_pools = client_session.read('vdnMulticastPools')
client_session.view_response(get_mcast_pools)
time.sleep(5)
# Update the newly created mcast pool
mcastpool_update_body = client_session.extract_resource_body_example('vdnMulticastPool', 'update')
mcastpool_update_body['multicastRange']['end'] = '235.3.1.1'
mcastpool_update_body['multicastRange']['name'] = 'Python'
update_response = client_session.update('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId':
create_response['objectId']},
request_body_dict=mcastpool_update_body)
client_session.view_response(update_response)
# display a specific Multicast Pool
get_mcast_pool = client_session.read('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId':
create_response['objectId']})
client_session.view_response(get_mcast_pool)
# Delete new mcast pool
client_session.delete('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId': create_response['objectId']})
#test_segment_pools()
#test_mcast_pools()
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n\"\"\"KF experiments development.\"\"\"\nfrom cgmtools import utils\nfrom cgmtools import plotting\nfrom cgmtools.forecast import kf\nimport datetime\nimport numpy as np\nimport pickle as pkl\n\n###########################... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n\"\"\"KF experiments development.\"\"\"\nfrom cgmtools import utils\nfrom cgmtools import plotting\nfrom cgmtools.forecast import kf\nimport datetime\nimport numpy as np\nimport pickle as pkl\n\n###########... | ```python
"""KF experiments development."""
from cgmtools import utils
from cgmtools import plotting
from cgmtools.forecast import kf
import datetime
import numpy as np
import pickle as pkl
###############################################################################
# Load full data set from pickle file (see data_wrangler.py)
dfs_full = pkl.load(open('../../data/dfs_py3.pkl', 'rb'))
# Keep only patients with more than `THRESHOLD` days of CGM acquisition
_threshold = datetime.timedelta(days=3.5) # default
dfs = utils.filter_patients(dfs_full, _threshold)
burn_in = 300 # burn-in samples used to learn the best order via cv
n_splits = 15
ph = 18 # prediction horizon
# State-space model:
# transition matrix (double integration model)
F = np.array([[2, -1], [1, 0]])
# measures matrix
H = np.array([1, 0])
# Get patients list
patients = list(dfs.keys())
for idx in patients:
df = utils.gluco_extract(dfs[idx], return_df=True)
# Learn the best order via cv
# lambda2_range = np.logspace(-12, -4, 10)
lambda2_range = np.logspace(-12, -4, 3)
sigma2_range = np.linspace(1, 40, 3)
# sigma2_range = np.linspace(1, 40, 10)
out = kf.grid_search(df, lambda2_range, sigma2_range, burn_in=burn_in,
n_splits=15, F=F, H=H,
return_mean_vld_error=True,
return_initial_state_mean=True,
return_initial_state_covariance=True,
verbose=False)
lambda2, sigma2, mse, X0, P0 = out
print("[{}]:\tBest lambda {:2.12f}, sigma {:2.0f}".format(idx, lambda2,
sigma2))
Q = np.array([[lambda2, 0], [0, 0]]) # transition_covariance
R = sigma2 # observation (co)variance
df = df.iloc[burn_in:] # don't mix-up training/test
_kf = kf.cgmkalmanfilter(F=F, Q=Q, R=R, X0=X0, P0=P0)
errs, forecast = kf.online_forecast(df, _kf, H, ph=18, lambda2=lambda2,
sigma2=sigma2, verbose=True)
# Save results reports
error_summary = utils.forecast_report(errs)
print(error_summary)
# import matplotlib.pyplot as plt
# plotting.cgm(df, forecast['ts'], title='Patient '+idx,
# savefig=False)
# plotting.residuals(df, forecast['ts'], skip_first=burn_in,
# skip_last=ph, title='Patient '+idx,
# savefig=False)
# plt.show()
# break
# # dump it into a pkl
pkl.dump(error_summary, open(idx+'.pkl', 'wb'))
try:
# Plot signal and its fit
plotting.cgm(df, forecast['ts'], title='Patient '+idx,
savefig=True)
# Plot residuals
plotting.residuals(df, forecast['ts'], skip_first=burn_in,
skip_last=ph, title='Patient '+idx,
savefig=True)
except:
print("Plotting failed for patient {}".format(idx))
``` |
[
{
"content": "```python\n\"\"\" Combine several other nodes together in parallel\n\nThis is useful to be combined with the\n:class:`~pySPACE.missions.nodes.meta.flow_node.FlowNode`.\n\"\"\"\n\nimport numpy\nfrom pySPACE.environments.chains.node_chain import NodeChainFactory\n\nfrom pySPACE.missions.nodes.base_n... | [
{
"content": "<|memory_start|>```python\n\"\"\" Combine several other nodes together in parallel\n\nThis is useful to be combined with the\n:class:`~pySPACE.missions.nodes.meta.flow_node.FlowNode`.\n\"\"\"\n\nimport numpy\nfrom pySPACE.environments.chains.node_chain import NodeChainFactory\n\nfrom pySPACE.missi... | ```python
""" Combine several other nodes together in parallel
This is useful to be combined with the
:class:`~pySPACE.missions.nodes.meta.flow_node.FlowNode`.
"""
import numpy
from pySPACE.environments.chains.node_chain import NodeChainFactory
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.feature_vector import FeatureVector
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.prediction_vector import PredictionVector
# ensemble imports
import os
import fcntl
import fnmatch
import cPickle
import logging
from collections import defaultdict
from pySPACE.missions.nodes.meta.flow_node import FlowNode
from pySPACE.tools.filesystem import locate
class SameInputLayerNode(BaseNode):
""" Encapsulates a set of other nodes that are executed in parallel in the flow.
This node was a thin wrapper around MDP's SameInputLayer node
but is now an own implementation.
**Parameters**
:enforce_unique_names:
When combining time series channels or feature vectors,
the node adds the index of the current node to the channel names or
feature names as a prefix to enforce unique names.
(*optional, default: True*)
**Exemplary Call**
.. code-block:: yaml
-
node : Same_Input_Layer
parameters :
enforce_unique_names : True
nodes :
-
node : Time_Domain_Features
parameters :
moving_window_length : 1
-
node : STFT_Features
parameters :
frequency_band : [2.0, 8.0]
frequency_resolution : 1.0
"""
def __init__(self, nodes,enforce_unique_names=True,
store = False, **kwargs):
self.nodes = nodes # needed to find out dimensions and trainability, ...
super(SameInputLayerNode, self).__init__(**kwargs)
self.permanent_state.pop("nodes")
self.set_permanent_attributes(output_type = None,
names = None,
unique = enforce_unique_names)
@staticmethod
def node_from_yaml(layer_spec):
""" Load the specs and initialize the layer nodes """
# This node requires one parameters, namely a list of nodes
assert("parameters" in layer_spec
and "nodes" in layer_spec["parameters"]),\
"SameInputLayerNode requires specification of a list of nodes!"
# Create all nodes that are packed together in this layer
layer_nodes = []
for node_spec in layer_spec["parameters"]["nodes"]:
node_obj = BaseNode.node_from_yaml(node_spec)
layer_nodes.append(node_obj)
layer_spec["parameters"].pop("nodes")
# Create the node object
node_obj = SameInputLayerNode(nodes = layer_nodes,**layer_spec["parameters"])
return node_obj
def reset(self):
""" Also reset internal nodes """
nodes = self.nodes
for node in nodes:
node.reset()
super(SameInputLayerNode, self).reset()
self.nodes = nodes
def register_input_node(self, input_node):
""" All sub-nodes have the same input node """
super(SameInputLayerNode, self).register_input_node(input_node)
# Register the node as the input for all internal nodes
for node in self.nodes:
node.register_input_node(input_node)
def _execute(self, data):
""" Process the data through the internal nodes """
names = []
result_array = None
result_label = []
result_predictor = []
result_prediction = []
# For all node-layers
for node_index, node in enumerate(self.nodes):
# Compute node's result
node_result = node.execute(data)
# Determine the output type of the node
if self.output_type is None:
self.output_type = type(node_result)
else:
assert (self.output_type == type(node_result)), \
"SameInputLayerNode requires that all of its layers return "\
"the same type. Types found: %s %s" \
% (self.output_type, type(node_result))
# Merge the nodes' outputs depending on the type
if self.output_type == FeatureVector:
result_array = \
self.add_feature_vector(node_result, node_index,
result_array, names)
elif self.output_type == PredictionVector:
if type(node_result.label) == list:
result_label.extend(node_result.label)
else:
# a single classification is expected here
result_label.append(node_result.label)
if type(node_result.prediction) == list:
result_prediction.extend(node_result.prediction)
else:
result_prediction.append(node_result.prediction)
if type(node_result.predictor) == list:
result_predictor.extend(node_result.predictor)
else:
result_predictor.append(node_result.predictor)
else:
assert (self.output_type == TimeSeries), \
"SameInputLayerNode can not merge data of type %s." \
% self.output_type
if self.names is None and not self.unique:
names.extend(node_result.channel_names)
elif self.names is None and self.unique:
for name in node_result.channel_names:
names.append("%i_%s" % (node_index, name))
if result_array == None:
result_array = node_result
if self.dtype == None:
self.dtype = node_result.dtype
else :
result_array = numpy.concatenate((result_array,
node_result), axis=1)
# Construct output with correct type and names
if self.names is None:
self.names = names
if self.output_type == FeatureVector:
return FeatureVector(result_array, self.names)
elif self.output_type == PredictionVector:
return PredictionVector(label=result_label,
prediction=result_prediction,
predictor=result_predictor)
else:
return TimeSeries(result_array, self.names,
node_result.sampling_frequency,
node_result.start_time, node_result.end_time,
node_result.name, node_result.marker_name)
def add_feature_vector(self, data, index, result_array, names):
""" Concatenate feature vectors, ensuring unique names """
if self.names is None and self.unique:
for name in data.feature_names:
names.append("%i_%s" % (index,name))
elif self.names is None and not self.unique:
names.extend(data.feature_names)
if result_array == None:
result_array = data
else:
result_array = numpy.concatenate((result_array,data), axis=1)
return result_array
def is_trainable(self):
""" Trainable if one subnode is trainable """
for node in self.nodes:
if node.is_trainable():
return True
return False
def is_supervised(self):
""" Supervised if one subnode requires supervised training """
for node in self.nodes:
if node.is_supervised():
return True
return False
#
# def train_sweep(self, use_test_data):
# """ Train all internal nodes """
# for node in self.nodes:
# node.train_sweep(use_test_data)
def _train(self, x, *args, **kwargs):
""" Perform single training step by training the internal nodes """
for node in self.nodes:
if node.is_training():
node.train(x, *args, **kwargs)
def _stop_training(self):
""" Perform single training step by training the internal nodes """
for node in self.nodes:
if node.is_training():
node.stop_training()
def store_state(self, result_dir, index=None):
""" Stores all nodes in subdirectories of *result_dir* """
for i, node in enumerate(self.nodes):
node_dir = os.path.join(result_dir, (self.__class__.__name__+str(index).split("None")[0]+str(i)))
node.store_state(node_dir, index=i)
def _inc_train(self,data,label):
""" Forward data to retrainable nodes
So the single nodes do not need to buffer or *present_labels* does not
have to be reimplemented.
"""
for node in self.nodes:
if node.is_retrainable():
node._inc_train(data, label)
def set_run_number(self, run_number):
""" Informs all subnodes about the number of the current run """
for node in self.nodes:
node.set_run_number(run_number)
super(SameInputLayerNode, self).set_run_number(run_number)
class EnsembleNotFoundException(Exception): pass
class ClassificationFlowsLoaderNode(BaseNode):
""" Combine an ensemble of pretrained node chains
This node loads all "pickled" flows whose file names match
*ensemble_pattern* and are contained in the directory tree rooted at
*ensemble_base_dir*. If the *flow_select_list* is not empty, only the
flows with indices contained in flow_select_list are used. The index "-1"
corresponds to "all flows".
**Parameters**
:ensemble_base_dir:
The root directory under which the stored flow objects which constitute
the ensemble are stored.
:ensemble_pattern:
Pickled flows must match the given pattern to be included into the
ensemble.
:flow_select_list:
This optional parameter allows to select only a subset of the flows
that are found in ensemble_base_dir. It must be a list of indices.
Only the flows with the given index are included into the ensemble.
If -1 is contained in the list, all flows are automatically added to
the ensemble.
.. note::
The order of the flows in the ensemble is potentially random or at
least hard to predict. Thus, this parameter should not be used
to select a specific flow. In contrast, this parameter can be used
to select a certain number of flows from the available flows
(where it doesn't matter which ones). This can be useful for instance
in benchmarking experiments when one is interested in
the average performance of an ensemble of a certain size.
(*optional, default: [-1]*)
:cache_dir:
If this argument is given, all results of all ensembles are remembered
and stored in a persistent cache file in the given cache_dir. These
cached results can be later reused without actually loading and
executing the ensemble.
(*optional, default: None*)
**Exemplary Call**
.. code-block:: yaml
-
node : Ensemble_Node
parameters :
ensemble_base_dir : "/tmp/" # <- insert suitable directory here
ensemble_pattern : "flow*.pickle"
flow_select_list : "eval(range(10))"
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2010/05/20
"""
def __init__(self, ensemble_base_dir, ensemble_pattern,
flow_select_list=[-1], cache_dir=None, **kwargs):
super(ClassificationFlowsLoaderNode, self).__init__(**kwargs)
# Load all flow-pickle files that match the given ensemble_pattern
# in the directory tree rooted in ensemble_base_dir
flow_pathes = tuple(locate(ensemble_pattern, ensemble_base_dir))
if -1 not in flow_select_list:
# Select only flows for ensemble whose index is contained in
# flow_select_list
flow_pathes = tuple(flow_pathes[index] for index in flow_select_list)
if len(flow_pathes) == 0:
raise EnsembleNotFoundException("No ensemble found in %s for pattern %s" %
(ensemble_base_dir, ensemble_pattern))
self.feature_names = \
map(lambda s: "_".join(s.split(os.sep)[-1].split('_')[0:2]),
flow_pathes)
self.set_permanent_attributes(ensemble = None,
flow_pathes = flow_pathes,
cache_dir = cache_dir,
cache = None,
cache_updated = False,
store = True) # always store cache
def _load_cache(self):
self.cache = defaultdict(dict)
# Check if there are cached results for this ensemble
for flow_path in self.flow_pathes:
file_path = self.cache_dir + os.sep + "ensemble_cache" + os.sep \
+ "cache_%s" % hash(flow_path)
if os.path.exists(file_path):
# Load ensemble cache
self._log("Loading flow cache from %s" % file_path)
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'r')
self.cache[flow_path] = cPickle.load(cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
def _load_ensemble(self):
self._log("Loading ensemble")
# Create a flow node for each flow pickle
flow_nodes = [FlowNode(subflow_path = flow_path)
for flow_path in self.flow_pathes]
# Create an SameInputLayer node that executes all flows independently
# with the same input
ensemble = SameInputLayerNode(flow_nodes, enforce_unique_names=True)
# We can now set the input dim and output dim
self.input_dim = ensemble.input_dim
self.output_dim = ensemble.output_dim
self.set_permanent_attributes(ensemble = ensemble)
def _train(self, data, label):
""" Trains the ensemble on the given data vector *data* """
if self.ensemble == None:
# Load ensemble since data is not cached
self._load_ensemble()
return self.ensemble.train(data, label)
def _execute(self, data):
# Compute data's hash
data_hash = hash(tuple(data.flatten()))
# Load ensemble's cache
if self.cache == None:
if self.cache_dir:
self._load_cache()
else: # Caching disabled
self.cache = defaultdict(dict)
# Try to lookup the result of this ensemble for the given data in the cache
labels = []
predictions = []
for i, flow_path in enumerate(self.flow_pathes):
if data_hash in self.cache[flow_path]:
label, prediction = self.cache[flow_path][data_hash]
else:
self.cache_updated = True
if self.ensemble == None:
# Load ensemble since data is not cached
self._load_ensemble()
node_result = self.ensemble.nodes[i].execute(data)
label = node_result.label
prediction = node_result.prediction
self.cache[flow_path][data_hash] = (label, prediction)
labels.append(label)
predictions.append(prediction)
result = PredictionVector(label=labels,
prediction=predictions,
predictor=self)
result.dim_names = self.feature_names
return result
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir* """
# Store cache if caching is enabled and cache has changed
if self.cache_dir and self.cache_updated:
if not os.path.exists(self.cache_dir + os.sep + "ensemble_cache"):
os.makedirs(self.cache_dir + os.sep + "ensemble_cache")
for flow_path in self.flow_pathes:
file_path = self.cache_dir + os.sep + "ensemble_cache" + os.sep \
+ "cache_%s" % hash(flow_path)
if os.path.exists(file_path):
self._log("Updating flow cache %s" % file_path)
# Update existing cache persistency file
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'r')
self.cache[flow_path].update(cPickle.load(cache_file))
cache_file.close()
cache_file = open(file_path, 'w')
cPickle.dump(self.cache[flow_path], cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
else:
self._log("Writing flow cache %s" % file_path)
# Create new cache persistency file
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'w')
cPickle.dump(self.cache[flow_path], cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
class MultiClassLayerNode(SameInputLayerNode):
""" Wrap the one vs. rest or one vs. one scheme around the given node
The given class labels are forwarded to the internal nodes.
During training, data is relabeled.
Everything else is the same as in the base node.
Though this scheme is most important for classification it permits
other trainable algorithms to use this scheme.
**Parameters**
:class_labels:
This is the complete list of expected class labels.
It is needed to construct the necessary flows in the
initialization stage.
:node:
Specification of the wrapped node for the used scheme
As class labels , for the *1vsR* scheme,
this node has to use *REST* and *LABEL*.
*LABEL* is replaced with the different `class_labels`.
The other label should be *REST*.
For the *1vs1* scheme *LABEL1* and *LABEL2* have to be used.
:scheme:
One of *1v1* (One vs. One) or *1vR* (One vs. Rest)
.. note:: The one class approach is included by simply not giving
'REST' label to the classifier, but filtering it out.
(*optional, default:'1v1'*)
**Exemplary Call**
.. code-block:: yaml
-
node : MultiClassLayer
parameters :
class_labels : ["Target", "Standard","Artifact"]
scheme : 1vR
node :
-
node : 1SVM
parameters :
class_labels : ["LABEL","REST"]
complexity : 1
"""
@staticmethod
def node_from_yaml(layer_spec):
""" Load the specs and initialize the layer nodes """
assert("parameters" in layer_spec
and "class_labels" in layer_spec["parameters"]
and "node" in layer_spec["parameters"]),\
"Node requires specification of a node and classification labels!"
scheme = layer_spec["parameters"].pop("scheme","1vs1")
# Create all nodes that are packed together in this layer
layer_nodes = []
node_spec = layer_spec["parameters"]["node"][0]
classes = layer_spec["parameters"]["class_labels"]
if scheme=='1vR':
for label in layer_spec["parameters"]["class_labels"]:
node_obj = BaseNode.node_from_yaml(NodeChainFactory.instantiate(node_spec,{"LABEL":label}))
layer_nodes.append(node_obj)
else:
n=len(classes)
for i in range(n-1):
for j in range(i+1,n):
replace_dict = {"LABEL1":classes[i],"LABEL2":classes[j]}
node_obj = BaseNode.node_from_yaml(NodeChainFactory.instantiate(node_spec,replace_dict))
layer_nodes.append(node_obj)
layer_spec["parameters"].pop("node")
layer_spec["parameters"].pop("class_labels")
# Create the node object
node_obj = MultiClassLayerNode(nodes = layer_nodes,**layer_spec["parameters"])
return node_obj
_NODE_MAPPING = {"Ensemble_Node": ClassificationFlowsLoaderNode,
"Same_Input_Layer": SameInputLayerNode,
}
``` |
[
{
"content": "Here is some code:\n```python\nfrom model.group import Group\n\nclass GroupHelper:\n\n def __init__(self, app):\n self.app = app\n\n def open_group_page(self):\n wd = self.app.wd\n if not (wd.current_url.endswith(\"/group.php\") and len(wd.find_elements_by_name(\"new\"))... | [
{
"content": "Here is some code:\n<|memory_start|>```python\nfrom model.group import Group\n\nclass GroupHelper:\n\n def __init__(self, app):\n self.app = app\n\n def open_group_page(self):\n wd = self.app.wd\n if not (wd.current_url.endswith(\"/group.php\") and len(wd.find_elements_b... | ```python
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def return_to_group_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
'''
Форма создания новой группы
'''
def create(self, group):
wd = self.app.wd
self.open_group_page()
# init group greation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_group_page()
self.group_cache = None
'''
Удаление первой группы
'''
def delete_first_group(self):
self.delete_group_by_index(0)
'''
Удаление рандомной группы
'''
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
#submit deletion
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
'''
Модификация группы
'''
def change_group(self, group):
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("edit").click()
self.fill_group_form(group)
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
'''
Модификация первой группы
'''
def modify_first_group(self):
self.modify_group_by_index(0)
'''
Модификация рандомной группы
'''
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
# modification
wd.find_element_by_name("edit").click()
# fill
self.fill_group_form(new_group_data)
# submit
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
'''
fill group form
'''
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
'''
отдельный метод выбора первой группы
'''
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
'''
метод выбора рандомной группы
'''
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
'''
проверка на существование групп
'''
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
'''
Получение списка групп
'''
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name = text.strip(), id = id))
return list(self.group_cache)
def delete_group_by_id(self, id):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def modify_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
# modification
wd.find_element_by_name("edit").click()
# fill
self.fill_group_form(new_group_data)
# submit
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
``` |
[
{
"content": "Return the code unaltered:\n```python\nfrom manifestgen.app import convert_filenames_to_urls\n\nimport unittest\n\nclass TestConvertFilenamesToUrls(unittest.TestCase):\n \n def test_cannonical(self):\n result = convert_filenames_to_urls( [ '/usr/proj/index.html', '/usr/proj/js/util.js... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\nfrom manifestgen.app import convert_filenames_to_urls\n\nimport unittest\n\nclass TestConvertFilenamesToUrls(unittest.TestCase):\n \n def test_cannonical(self):\n result = convert_filenames_to_urls( [ '/usr/proj/index.html', '/usr... | ```python
from manifestgen.app import convert_filenames_to_urls
import unittest
class TestConvertFilenamesToUrls(unittest.TestCase):
def test_cannonical(self):
result = convert_filenames_to_urls( [ '/usr/proj/index.html', '/usr/proj/js/util.js'], '/usr/proj', '/sample/static' )
self.assertEqual( result, [ '/sample/static/index.html', '/sample/static/js/util.js' ])
def test_no_url_prefix(self):
result = convert_filenames_to_urls( [ '/usr/proj/index.html', '/usr/proj/js/util.js'], '/usr/proj')
self.assertEqual( result, [ '/index.html', '/js/util.js' ])
def test_no_docroot(self):
result = convert_filenames_to_urls( [ '/usr/proj/index.html', '/usr/proj/js/util.js'], url_prefix='/sample/static')
self.assertEqual( result, [ '/sample/static/usr/proj/index.html', '/sample/static/usr/proj/js/util.js' ])
def test_no_docroot_no_url_prefix(self):
result = convert_filenames_to_urls( [ '/usr/proj/index.html', '/usr/proj/js/util.js'])
self.assertEqual( result, [ '/usr/proj/index.html', '/usr/proj/js/util.js' ])
``` |
[
{
"content": "Here is some code:\n```python\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# ---------------------------------------------------\r\n# Copyright (c) 2013 Pablo Caro. All Rights Reserved.\r\n# Pablo Caro <me@pcaro.es> - http://pcaro.es/\r\n# ProgressBar.py\r\n# --------------------------... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# ---------------------------------------------------\r\n# Copyright (c) 2013 Pablo Caro. All Rights Reserved.\r\n# Pablo Caro <me@pcaro.es> - http://pcaro.es/\r\n# ProgressBar.py\r\n# ----------... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------
# Copyright (c) 2013 Pablo Caro. All Rights Reserved.
# Pablo Caro <me@pcaro.es> - http://pcaro.es/
# ProgressBar.py
# ---------------------------------------------------
import sys
class ProgressBar:
def __init__(self, min=0, max=100, width=60, charset='[=]'):
self.min = min
self.max = max
self.width = width
self.current = min
self.percent = 0.0
self.int_percent = 0
if len(charset) != 3:
charset = '[=]'
self.charset = charset
self.bar = ''
self.used = -1
self.int_percent_change = False
def update(self, current):
self.current = current
self.percent = (float(self.current-self.min)/(self.max-self.min))*100.0
int_percent = int(self.percent)
if int_percent != self.int_percent:
self.int_percent_change = True
self.int_percent = int_percent
self.__generate_bar__()
if self.int_percent_change:
self.int_percent_change = False
return True
else:
return False
def show(self):
sys.stdout.write(str(self))
sys.stdout.flush()
def __str__(self):
return self.bar
def __generate_bar__(self):
self.used = int((float(self.current-self.min)/(self.max-self.min)) *
(self.width-6))
center = self.charset[1] * self.used
self.bar = (self.charset[0] + center + self.charset[2]
+ " " + str(self.int_percent) + '%' + '\r')
def main():
pass
if __name__ == '__main__':
main()
``` |
[
{
"content": "Here is the script:\n```python\n#! /usr/bin/env python\n\n# Goal: Merge and run coverage analysis on the two Samples generated.\n# Output: A mered bam file, and coverage analysis on the merged bam file.\n\nfrom optparse import OptionParser\nimport os\nimport os.path\nimport sys\nimport re\nimpor... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n#! /usr/bin/env python\n\n# Goal: Merge and run coverage analysis on the two Samples generated.\n# Output: A mered bam file, and coverage analysis on the merged bam file.\n\nfrom optparse import OptionParser\nimport os\nimport os.path\nimport sys\n... | ```python
#! /usr/bin/env python
# Goal: Merge and run coverage analysis on the two Samples generated.
# Output: A mered bam file, and coverage analysis on the merged bam file.
from optparse import OptionParser
import os
import os.path
import sys
import re
import datetime
import json
from QC_Run import QC_Run
from tools import *
class Merger:
# @param bams_to_merge a list of the bam files to merge together
# @param merged_dir the directory in which to place the merged bam file
# @param sample_name the name of the sample. Used for the SM tag
# @param cleanup Flag to delete the temporary files or not. Default: false
def __init__(self, sample_json=None):
if sample_json:
self.sample_json = json.load(open(sample_json))
self.merge_dir = ''
self.bams_to_merge = []
self.runs_to_merge = []
self.QC_Run = QC_Run(self.sample_json)
# merge the following runs
def merge(self):
# this could be just a temporary fix
if os.path.isfile(self.path_to_merged_bam):
print "%s already exists. Not making it again."%self.path_to_merged_bam
else:
print "Sample %s is merging the following runs: %s"%(self.sample_name, self.bams_to_merge)
merge_command = "java -jar /opt/picard/picard-tools-current/MergeSamFiles.jar "
# Add each run's bam file to mergeJob.sh
for bam in self.bams_to_merge:
if not os.path.isfile(bam) or bam[-4:] != ".bam":
print "ERROR: the bam file '%s' does not exist!"%bam
sys.exit(4)
merge_command += "INPUT=%s "%bam
# make sure the merged_dir exists, or make it.
runCommandLine("mkdir -p %s"%self.merged_dir)
#if not os.path.isdir(merged_dir):
#print "ERROR: the output dir '%s' does not exist!"%bam
#sys.exit(4)
# Now set the output file, and then run the merge command
merge_command += " OUTPUT=%s/merged_badHeader.bam "%self.merged_dir
if runCommandLine(merge_command) != 0:
print "ERROR: %s something went wrong with merging!"%self.sample_name
sys.exit(1)
#echo "fixing header for %s/merged_badHeader.bam"
correct_header_command = "samtools view -H %s/merged_badHeader.bam > %s/merged.header.sam "%(self.merged_dir, self.merged_dir)
if runCommandLine(correct_header_command) != 0:
print "ERROR: samtools view -H failed!"
sys.exit(1)
# A better way would be to check to see if the SM tags already match. Then we would be able to use ionstats and such.
SM_check_command = "grep -Eo 'SM:[a-zA-Z0-9_&/-]*"
# NEED TO TEST THIS COMMAND. Is there anything that comes before the next : that is important?
# Change the SM: tag so that it matches for every run merged. (There should be one SM tag for each run merged)
# This was the old command. We will keep using this, and then if there are problems, we can manually correct them.
sed_command = 'sed "s/SM:[a-zA-Z0-9_&/-]*/SM:%s/" %s/merged.header.sam > %s/merged.headerCorrected.sam'%(self.sample_name, self.merged_dir, self.merged_dir)
# this updated command will change the SM tag to match everything up to the next : after the SM tag.
# this command deletes the KS: tag!! not good! I don't know why but some headers are tab delimited, and some are not it seems.
#sed_command = 'sed -E "s/SM:[^:]*:/SM:%s:/" %s/merged.header.sam > %s/merged.headerCorrected.sam'%(self.sample_name, self.merged_dir, self.merged_dir)
if runCommandLine(sed_command) != 0:
print "ERROR: sed command failed!"
sys.exit(1)
# write the new header to merged.bam
reheader_command = "samtools reheader %s/merged.headerCorrected.sam %s/merged_badHeader.bam > %s "%(self.merged_dir, self.merged_dir, self.path_to_merged_bam)
if runCommandLine(reheader_command) != 0:
print "ERROR: sed command failed!"
sys.exit(1)
# set some extra variables for the JSON file.
self.merged_json = "%s/merged.json"%self.merged_dir
# if there is already an index file from a previous merge try, delete it.
if os.path.isfile(self.path_to_merged_bam + ".bai"):
os.remove(self.path_to_merged_bam + ".bai")
# IF specified, cleanup the temporary files
#if self.cleanup:
# Need to cleanup here inorder for TVC to work. there can only be one bam file in the merged dir.
os.remove("%s/merged_badHeader.bam"%self.merged_dir)
os.remove("%s/merged.headerCorrected.sam"%self.merged_dir)
os.remove("%s/merged.header.sam"%self.merged_dir)
print "%s finished merging "%self.merged_dir
# Update the final merged run status
def update_merged_run_status(self, run, merged_perc_avail_bases=0):
pass_fail_merged_status = 'pass'
run_json = json.load(open(run))
if run_json['run_type'] == 'germline':
merged_perc_aval_bases = run_json['run_data']['amp_cov']
print merged_perc_avail_bases, self.sample_json['analysis']['settings']['cutoffs']['merged_amp_cov']
# check to see if >90% of the bases are shared between the tumor normal comparison
if 'merged_amp_cov' in self.sample_json['analysis']['settings']['cutoffs'] and merged_perc_avail_bases != '':
if merged_perc_avail_bases < self.sample_json['analysis']['settings']['cutoffs']['merged_amp_cov']:
pass_fail_merged_status = 'REQUEUE'
# write the final statuses here
run_json['pass_fail_merged_status'] = pass_fail_merged_status
run_json['merged_perc_avail_bases'] = merged_perc_avail_bases
write_json(run, run_json)
# @param runs the runs of a sample
# @param run_name either '', 'Normal/' or 'Tumor/'
# @param pref the prefix of this type of merge. either 'normal_' 'tumor_' or ''
# @returns a list of the passing bam files to merge, and the path to the merged dir.
def check_merge(self, runs, run_name='', pref=''):
# vars to return
merge = False
self.bams_to_merge = []
self.runs_to_merge = []
# Use this count so that we won't have to write over past merges if there are multiple merges.
if 'merged_%scount'%pref not in self.sample_json:
self.sample_json['merged_%scount'%pref] = 0
# first check to see if all of the runs pass.
# Get all of the passing bam files for this sample.
pending_runs, passing_runs = self.QC_Run.get_runs_status(runs)
if len(pending_runs) != 0:
print "Not merging. After QC_runs, runs should either be 'pass' or 'fail', not 'pending'. Pending runs: ", pending_runs
elif len(passing_runs) < 1:
# if none of the runs are passing, then don't do anything.
pass
elif self.sample_json['sample_status'] != "pending_merge" and self.sample_json['sample_status'] != "merged":
# If any runs of the sample are not ready to be merged either because of 3x3 table error rate questions or other reasons, don't merge this sample.
print "%s the 'sample_status' is '%s'. Needs to be 'pending_merge' to merge the runs."%(self.sample_json['sample_name'], self.sample_json['sample_status'])
elif self.sample_json['sample_status'] == 'pending_merge':
# Merge these runs.
# First get the passing bams from the passing runs.
for run in passing_runs:
run_json = json.load(open(run))
self.bams_to_merge.append("%s/%s"%(run_json['run_folder'], run_json['analysis']['files'][0]))
self.runs_to_merge.append(run_json['run_name'])
# sort the run names
self.runs_to_merge.sort()
# If this sample has already been merged: If the runs to generate the merged bam don't match the current list:
# then delete the last created bam file and merge these runs
# else don't remerge these files
if len(self.bams_to_merge) == 1:
# There is only one run, so don't merge it. Set the "final_%sjson"%pref flag to show what the final run is
self.sample_json["final_%sjson"%pref] = run
# use the 'merged_json' flag rather than the 'final_json' flag because 'final_json' can be set by a single non-merged run.
elif 'merged_%sjson'%pref in self.sample_json and os.path.isfile(self.sample_json['merged_%sjson'%pref]):
merged_json_data = json.load(open(self.sample_json['merged_%sjson'%pref]))
# If the runs used to generate the current merged.bam file dont match the current bams_to_merge, then merge them. Otherwise don't
if merged_json_data['json_type'] == 'merged' and set(self.bams_to_merge) != set(merged_json_data['bams_used_to_merge']):
# in order to manage space, delete the last merged folder that was created.
if self.sample_json['analysis']['settings']['cleanup'] == True:
# IDEA delete the entire folder? Or just the bam file?
merged_bam = "%s/%s"%(merged_json_data['run_folder'], merged_json_data['analysis']['files'][0])
print " Deleting the old merged bam file: %s"%merged_bam
os.remove(merged_bam)
# Add one to the merged_count
self.sample_json['merged_%scount'%pref] += 1
# set new path to the merged_json
self.merged_dir = "%s/%sMerged_%d"%(self.sample_json['sample_folder'], run_name, self.sample_json['merged_%scount'%pref])
merge = True
else:
# Don't merge these runs because they've already been merged.
print "%s the runs: '%s' have already been merged"%(self.sample_json['sample_name'], self.bams_to_merge)
else:
# Merge these runs
self.merged_dir = "%s/%sMerged"%(self.sample_json['sample_folder'], run_name)
# Add one to the merged_count
self.sample_json['merged_%scount'%pref] += 1
merge = True
return merge
# merge the runs of a sample
# @param runs the bam files to merge
# @param merged_dir the ouptut_dir in which to place the merged bam file
# @param pref the prefix (either '', 'normal_', or 'tumor')
# @param run_type either germline, normal, or tumor.
# @param run_name either Merged, Normal_Merged or Tumor_Merged. Used for the titles of the 3x3 tables.
def merge_runs(self, run_type, run_name='', pref=''):
# if the file already exists, then merging must have finished, and don't merge again.
self.merged_json = "%s/merged.json"%self.merged_dir
if os.path.isfile(self.merged_json):
print "%s already exists so not merging the bam files again"%self.merged_json
else:
self.sample_name = self.sample_json['sample_name']
# get today's date to format the mreged.bam file name
curr_date = datetime.date.today()
# the name follows this format: A_227_Tumor_Merged_02152015
run_name = "%s_%sMerged_%02d%02d%s"%(self.sample_name, run_name, curr_date.month, curr_date.day, curr_date.year)
merged_bam = "%s.bam"%(run_name)
self.path_to_merged_bam = "%s/%s"%(self.merged_dir, merged_bam)
self.merge()
# now set the json files
# create the merged_bam's json file here so that the merger.py script can run on its own if necessary.
merged_json = {
'analysis': {
'files': [merged_bam]
},
'bams_used_to_merge':self.bams_to_merge,
'sample_name': self.sample_name,
'merged_bam': self.path_to_merged_bam,
'json_file': self.merged_json,
"json_type": "merged",
"pass_fail_status": "pending",
"project": self.sample_json['project'],
"run_folder": self.merged_dir,
"run_name": run_name,
"run_num": self.sample_json['merged_%scount'%pref],
"run_type": run_type,
"runs_used_to_merge": ', '.join(self.runs_to_merge),
"sample": self.sample_json['sample_name'],
"sample_folder": self.sample_json['sample_folder'],
"sample_json": self.sample_json['json_file']
}
#write new json file
write_json(self.merged_json, merged_json)
# QC the merged run.
self.QC_Run.runTVC_COV(self.merged_json, pref)
self.QC_Run.getRunInfo(self.merged_json, pref)
# Update the merge pass/fail status based on the metrics gathered by QC_getRunInfo.sh
self.QC_Run.update_run_status(self.merged_json, 1)
# Also store the path to this merged bam file in the sample's json file. Not really necessary, but it seems like a good idea.
#if 'merged' not in self.sample_json['analysis']['files']:
# self.sample_json['analysis']['files']['merged'] = {}
#self.sample_json['analysis']['files']['merged']['%sbam'%pref] = merger.path_to_merged_bam
# store the path to this merged bam folder in the sample's json file.
#self.sample_json['merged_%sjson'%pref] = merged_dir
# If the merge_json passes the cutoffs, set it as the final_json
merge_json = json.load(open(self.merged_json))
# add the path to this merge even if it doesn't pass
self.sample_json["merged_%sjson"%pref] = self.merged_json
if merge_json['pass_fail_status'] == 'pass':
# Add a path to the final merged_json
self.sample_json["final_%sjson"%pref] = self.merged_json
# write the modified sample_json file
write_json(self.sample_json['json_file'], self.sample_json)
# If we need this script to run on its own, update it when it is needed
#if __name__ == '__main__':
#
# # set up the option parser
# parser = OptionParser()
#
# # add the options to parse
# parser.add_option('-j', '--json', dest='json', help='The samples json file. Will be used to get the passing bams.')
# parser.add_option('-o', '--merged_dir', dest='output', help='The output file. If no output file is specified, output will be written to the screen')
# parser.add_option('-s', '--sample_name', dest='sample', help='The name of the sample. Will be used to fix the SM tag of the merged BAM file')
# parser.add_option('-b', '--bams', dest='bams', action='append', help='Use a -b for for each bam to include in merging')
# parser.add_option('-c', '--cleanup', dest='cleanup', action='store_true', help='option to cleanup the temporary files used in merging and such.')
#
# (options, args) = parser.parse_args()
#
# if options.json and (not options.output and not options.sample and not options.bams):
# Merger(options.json)
# # I don't have time to implement these other options yet...
# #elif not options.json and (options.output and options.sample and options.bams):
## merger = Merger()
## merger.merge()
## Merger(options.bams, options.output, options.sample)
# else:
# print "USAGE_ERROR: -j or (-o, -s and -b) are required. If the json file is provided, do not provide the other options. If the other options are provided, do not provide a json file."
# print "only -j is implemented so far..."
# parser.print_help()
# sys.exit(1)
#
``` |
[
{
"content": "Here is the source code:\n```python\n#!/usr/bin/env python3\n\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import *\nfrom test_framework.script import *\nfrom test_framework.mininode import *\nfrom test_framework.berycoin import *\nfrom test_framework.a... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n#!/usr/bin/env python3\n\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import *\nfrom test_framework.script import *\nfrom test_framework.mininode import *\nfrom test_framework.berycoin import *\nfrom ... | ```python
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.berycoin import *
from test_framework.address import *
from test_framework.blocktools import *
import time
import io
class BerycoinSpendOpCallTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.nodes[0].generate(10+COINBASE_MATURITY)
# Create a new contract that can receive funds
"""
pragma solidity ^0.4.12;
contract Test {
function () payable {}
}
"""
contract_bytecode = "60606040523415600e57600080fd5b5b603580601c6000396000f30060606040525b5b5b0000a165627a7a723058202a205a0473a338a161903e98bd0920e9c01b9ab0a8f94f8f19028c49733fb60d0029"
first_contract_address = self.nodes[0].createcontract(contract_bytecode)['address']
self.nodes[0].generate(1)
# Send 100000 berycoin to the contract
self.nodes[0].sendtocontract(first_contract_address, "00", 100000)['txid']
blockhash = self.nodes[0].generate(1)[0]
prev_block = self.nodes[0].getblock(blockhash)
# Extract the transaction which will be the prevout to spend the contract's funds later on
op_call_txid = prev_block['tx'][-1]
block = create_block(int(prev_block['hash'], 16), create_coinbase(prev_block['height']+1), prev_block['time']+1)
block.hashStateRoot = int(prev_block['hashStateRoot'], 16)
block.hashUTXORoot = int(prev_block['hashUTXORoot'], 16)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(op_call_txid, 16), 0), scriptSig=CScript([]))]
tx.vout = [CTxOut(int(100000*COIN), scriptPubKey=CScript([OP_TRUE]))]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
block_count = self.nodes[0].getblockcount()
ret = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize()))
assert_equal(self.nodes[0].getblockcount(), block_count)
if __name__ == '__main__':
BerycoinSpendOpCallTest().main()
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not\n# use this file except in compliance wit... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not\n# use this file except i... | ```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
See the [README.md] file for more information.
"""
import io
import sys
import logging
import pathlib
import argparse
# Allows calling the script as a standalone utility
# See: https://github.com/mjpost/sacrebleu/issues/86
if __package__ is None and __name__ == '__main__':
parent = pathlib.Path(__file__).absolute().parents[1]
sys.path.insert(0, str(parent))
__package__ = 'sacrebleu'
from .tokenizers import TOKENIZERS, DEFAULT_TOKENIZER
from .dataset import DATASETS, DOMAINS, COUNTRIES, SUBSETS
from .metrics import METRICS
from .utils import smart_open, filter_subset, get_available_origlangs, SACREBLEU_DIR
from .utils import get_langpairs_for_testset, get_available_testsets
from .utils import print_test_set, get_reference_files, download_test_set
from . import __version__ as VERSION
sacrelogger = logging.getLogger('sacrebleu')
try:
# SIGPIPE is not available on Windows machines, throwing an exception.
from signal import SIGPIPE
# If SIGPIPE is available, change behaviour to default instead of ignore.
from signal import signal, SIG_DFL
signal(SIGPIPE, SIG_DFL)
except ImportError:
sacrelogger.warning('Could not import signal.SIGPIPE (this is expected on Windows machines)')
def parse_args():
arg_parser = argparse.ArgumentParser(
description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\n'
'Quick usage: score your detokenized output against WMT\'14 EN-DE:\n'
' cat output.detok.de | sacrebleu -t wmt14 -l en-de',
formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('--citation', '--cite', default=False, action='store_true',
help='dump the bibtex citation and quit.')
arg_parser.add_argument('--list', default=False, action='store_true',
help='print a list of all available test sets.')
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
help='the test set to use (see also --list) or a comma-separated list of test sets to be concatenated')
arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None,
help='source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None,
help='use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation')
arg_parser.add_argument('--subset', dest='subset', default=None,
help='use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)')
arg_parser.add_argument('--download', type=str, default=None,
help='download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=str, default=None,
help='output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit')
# I/O related arguments
arg_parser.add_argument('--input', '-i', type=str, default='-',
help='Read input from a file instead of STDIN')
arg_parser.add_argument('refs', nargs='*', default=[],
help='optional list of references (for backwards-compatibility with older scripts)')
arg_parser.add_argument('--num-refs', '-nr', type=int, default=1,
help='Split the reference stream on tabs, and expect this many references. Default: %(default)s.')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
# Metric selection
arg_parser.add_argument('--metrics', '-m', choices=METRICS.keys(), nargs='+', default=['bleu'],
help='metrics to compute (default: bleu)')
arg_parser.add_argument('--sentence-level', '-sl', action='store_true', help='Output metric on each sentence.')
# BLEU-related arguments
arg_parser.add_argument('-lc', action='store_true', default=False, help='Use case-insensitive BLEU (default: False)')
arg_parser.add_argument('--smooth-method', '-s', choices=METRICS['bleu'].SMOOTH_DEFAULTS.keys(), default='exp',
help='smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none')
arg_parser.add_argument('--smooth-value', '-sv', type=float, default=None,
help='The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'.format(
METRICS['bleu'].SMOOTH_DEFAULTS['floor'], METRICS['bleu'].SMOOTH_DEFAULTS['add-k']))
arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(), default=None,
help='Tokenization method to use for BLEU. If not provided, defaults to `zh` for Chinese, `mecab` for Japanese and `mteval-v13a` otherwise.')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
# ChrF-related arguments
arg_parser.add_argument('--chrf-order', type=int, default=METRICS['chrf'].ORDER,
help='chrf character order (default: %(default)s)')
arg_parser.add_argument('--chrf-beta', type=int, default=METRICS['chrf'].BETA,
help='chrf BETA parameter (default: %(default)s)')
arg_parser.add_argument('--chrf-whitespace', action='store_true', default=False,
help='include whitespace in chrF calculation (default: %(default)s)')
# Reporting related arguments
arg_parser.add_argument('--quiet', '-q', default=False, action='store_true',
help='suppress informative output')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action='store_true',
help='output only the BLEU score')
arg_parser.add_argument('--width', '-w', type=int, default=1,
help='floating point width (default: %(default)s)')
arg_parser.add_argument('--detail', '-d', default=False, action='store_true',
help='print extra information (split test sets based on origlang)')
arg_parser.add_argument('-V', '--version', action='version',
version='%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
return args
def main():
args = parse_args()
# Explicitly set the encoding
sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline="\n")
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True)
if not args.quiet:
logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s')
if args.download:
download_test_set(args.download, args.langpair)
sys.exit(0)
if args.list:
if args.test_set:
print(' '.join(get_langpairs_for_testset(args.test_set)))
else:
print('The available test sets are:')
for testset in get_available_testsets():
print('%30s: %s' % (testset, DATASETS[testset].get('description', '').strip()))
sys.exit(0)
if args.sentence_level and len(args.metrics) > 1:
sacrelogger.error('Only one metric can be used with Sentence-level reporting.')
sys.exit(1)
if args.citation:
if not args.test_set:
sacrelogger.error('I need a test set (-t).')
sys.exit(1)
for test_set in args.test_set.split(','):
if 'citation' not in DATASETS[test_set]:
sacrelogger.error('No citation found for %s', test_set)
else:
print(DATASETS[test_set]['citation'])
sys.exit(0)
if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1):
sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.')
sacrelogger.error('You can only use it with externaly-provided references, however (i.e., not with `-t`),')
sacrelogger.error('and you cannot then provide multiple reference files.')
sys.exit(1)
if args.test_set is not None:
for test_set in args.test_set.split(','):
if test_set not in DATASETS:
sacrelogger.error('Unknown test set "%s"', test_set)
sacrelogger.error('Please run with --list to see the available test sets.')
sys.exit(1)
if args.test_set is None:
if len(args.refs) == 0:
sacrelogger.error('I need either a predefined test set (-t) or a list of references')
sacrelogger.error(get_available_testsets())
sys.exit(1)
elif len(args.refs) > 0:
sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references')
sys.exit(1)
elif args.langpair is None:
sacrelogger.error('I need a language pair (-l).')
sys.exit(1)
else:
for test_set in args.test_set.split(','):
langpairs = get_langpairs_for_testset(test_set)
if args.langpair not in langpairs:
sacrelogger.error('No such language pair "%s"', args.langpair)
sacrelogger.error('Available language pairs for test set "%s": %s', test_set,
', '.join(langpairs))
sys.exit(1)
if args.echo:
if args.langpair is None or args.test_set is None:
sacrelogger.warning("--echo requires a test set (--t) and a language pair (-l)")
sys.exit(1)
for test_set in args.test_set.split(','):
print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset)
sys.exit(0)
if args.test_set is not None and args.tokenize == 'none':
sacrelogger.warning("You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\n"
"your own reference tokenization. Published numbers will not be comparable with other papers.\n")
if 'ter' in args.metrics and args.tokenize is not None:
logging.warning("Your setting of --tokenize will be ignored when "
"computing TER")
# Internal tokenizer settings
if args.tokenize is None:
# set default
if args.langpair is not None and args.langpair.split('-')[1] == 'zh':
args.tokenize = 'zh'
elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':
args.tokenize = 'ja-mecab'
else:
args.tokenize = DEFAULT_TOKENIZER
if args.langpair is not None and 'bleu' in args.metrics:
if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':
sacrelogger.warning('You should also pass "--tok zh" when scoring Chinese...')
if args.langpair.split('-')[1] == 'ja' and not args.tokenize.startswith('ja-'):
sacrelogger.warning('You should also pass "--tok ja-mecab" when scoring Japanese...')
# concat_ref_files is a list of list of reference filenames, for example:
# concat_ref_files = [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]]
if args.test_set is None:
concat_ref_files = [args.refs]
else:
concat_ref_files = []
for test_set in args.test_set.split(','):
ref_files = get_reference_files(test_set, args.langpair)
if len(ref_files) == 0:
sacrelogger.warning('No references found for test set {}/{}.'.format(test_set, args.langpair))
concat_ref_files.append(ref_files)
# Read references
full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))]
for ref_files in concat_ref_files:
for refno, ref_file in enumerate(ref_files):
for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1):
if args.num_refs != 1:
splits = line.rstrip().split(sep='\t', maxsplit=args.num_refs-1)
if len(splits) != args.num_refs:
sacrelogger.error('FATAL: line {}: expected {} fields, but found {}.'.format(lineno, args.num_refs, len(splits)))
sys.exit(17)
for refno, split in enumerate(splits):
full_refs[refno].append(split)
else:
full_refs[refno].append(line)
# Decide on the number of final references, override the argument
args.num_refs = len(full_refs)
# Read hypotheses stream
if args.input == '-':
inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding)
else:
inputfh = smart_open(args.input, encoding=args.encoding)
full_system = inputfh.readlines()
# Filter sentences according to a given origlang
system, *refs = filter_subset(
[full_system, *full_refs], args.test_set, args.langpair, args.origlang, args.subset)
if len(system) == 0:
message = 'Test set %s contains no sentence' % args.test_set
if args.origlang is not None or args.subset is not None:
message += ' with'
message += '' if args.origlang is None else ' origlang=' + args.origlang
message += '' if args.subset is None else ' subset=' + args.subset
sacrelogger.error(message)
sys.exit(1)
# Create metric inventory, let each metric consume relevant args from argparse
metrics = [METRICS[met](args) for met in args.metrics]
# Handle sentence level and quit
if args.sentence_level:
# one metric in use for sentence-level
metric = metrics[0]
for output, *references in zip(system, *refs):
score = metric.sentence_score(output, references)
print(score.format(args.width, args.score_only, metric.signature))
sys.exit(0)
# Else, handle system level
for metric in metrics:
try:
score = metric.corpus_score(system, refs)
except EOFError:
sacrelogger.error('The input and reference stream(s) were of different lengths.')
if args.test_set is not None:
sacrelogger.error('\nThis could be a problem with your system output or with sacreBLEU\'s reference database.\n'
'If the latter, you can clean out the references cache by typing:\n'
'\n'
' rm -r %s/%s\n'
'\n'
'They will be downloaded automatically again the next time you run sacreBLEU.', SACREBLEU_DIR,
args.test_set)
sys.exit(1)
else:
print(score.format(args.width, args.score_only, metric.signature))
if args.detail:
width = args.width
sents_digits = len(str(len(full_system)))
origlangs = args.origlang if args.origlang else get_available_origlangs(args.test_set, args.langpair)
for origlang in origlangs:
subsets = [None]
if args.subset is not None:
subsets += [args.subset]
elif all(t in SUBSETS for t in args.test_set.split(',')):
subsets += COUNTRIES + DOMAINS
for subset in subsets:
system, *refs = filter_subset([full_system, *full_refs], args.test_set, args.langpair, origlang, subset)
if len(system) == 0:
continue
if subset in COUNTRIES:
subset_str = '%20s' % ('country=' + subset)
elif subset in DOMAINS:
subset_str = '%20s' % ('domain=' + subset)
else:
subset_str = '%20s' % ''
for metric in metrics:
# FIXME: handle this in metrics
if metric.name == 'bleu':
_refs = refs
elif metric.name == 'chrf':
_refs = refs[0]
score = metric.corpus_score(system, _refs)
print('origlang={} {}: sentences={:{}} {}={:{}.{}f}'.format(
origlang, subset_str, len(system), sents_digits,
score.prefix, score.score, width+4, width))
if __name__ == '__main__':
main()
``` |
[
{
"content": "Here is some code:\n```python\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-present Rapptz\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, i... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-present Rapptz\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software withou... | ```python
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import logging
import signal
import sys
import traceback
from typing import Any, Generator, List, Optional, Sequence, TYPE_CHECKING, TypeVar, Union
import aiohttp
from .user import User
from .invite import Invite
from .template import Template
from .widget import Widget
from .guild import Guild
from .channel import _channel_factory
from .enums import ChannelType
from .mentions import AllowedMentions
from .errors import *
from .enums import Status, VoiceRegion
from .flags import ApplicationFlags
from .gateway import *
from .activity import BaseActivity, create_activity
from .voice_client import VoiceClient
from .http import HTTPClient
from .state import ConnectionState
from . import utils
from .object import Object
from .backoff import ExponentialBackoff
from .webhook import Webhook
from .iterators import GuildIterator
from .appinfo import AppInfo
from .ui.view import View
from .stage_instance import StageInstance
__all__ = (
'Client',
)
if TYPE_CHECKING:
from .abc import SnowflakeTime
log = logging.getLogger(__name__)
def _cancel_tasks(loop):
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
if not tasks:
return
log.info('Cleaning up after %d tasks.', len(tasks))
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
log.info('All tasks finished cancelling.')
for task in tasks:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'Unhandled exception during Client.run shutdown.',
'exception': task.exception(),
'task': task
})
def _cleanup_loop(loop):
try:
_cancel_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
log.info('Closing the event loop.')
loop.close()
class Client:
r"""Represents a client connection that connects to Discord.
This class is used to interact with the Discord WebSocket and API.
A number of options can be passed to the :class:`Client`.
Parameters
-----------
max_messages: Optional[:class:`int`]
The maximum number of messages to store in the internal message cache.
This defaults to ``1000``. Passing in ``None`` disables the message cache.
.. versionchanged:: 1.3
Allow disabling the message cache and change the default size to ``1000``.
loop: Optional[:class:`asyncio.AbstractEventLoop`]
The :class:`asyncio.AbstractEventLoop` to use for asynchronous operations.
Defaults to ``None``, in which case the default event loop is used via
:func:`asyncio.get_event_loop()`.
connector: :class:`aiohttp.BaseConnector`
The connector to use for connection pooling.
proxy: Optional[:class:`str`]
Proxy URL.
proxy_auth: Optional[:class:`aiohttp.BasicAuth`]
An object that represents proxy HTTP Basic Authorization.
shard_id: Optional[:class:`int`]
Integer starting at ``0`` and less than :attr:`.shard_count`.
shard_count: Optional[:class:`int`]
The total number of shards.
application_id: :class:`int`
The client's application ID.
intents: :class:`Intents`
The intents that you want to enable for the session. This is a way of
disabling and enabling certain gateway events from triggering and being sent.
If not given, defaults to a regularly constructed :class:`Intents` class.
.. versionadded:: 1.5
member_cache_flags: :class:`MemberCacheFlags`
Allows for finer control over how the library caches members.
If not given, defaults to cache as much as possible with the
currently selected intents.
.. versionadded:: 1.5
chunk_guilds_at_startup: :class:`bool`
Indicates if :func:`.on_ready` should be delayed to chunk all guilds
at start-up if necessary. This operation is incredibly slow for large
amounts of guilds. The default is ``True`` if :attr:`Intents.members`
is ``True``.
.. versionadded:: 1.5
status: Optional[:class:`.Status`]
A status to start your presence with upon logging on to Discord.
activity: Optional[:class:`.BaseActivity`]
An activity to start your presence with upon logging on to Discord.
allowed_mentions: Optional[:class:`AllowedMentions`]
Control how the client handles mentions by default on every message sent.
.. versionadded:: 1.4
heartbeat_timeout: :class:`float`
The maximum numbers of seconds before timing out and restarting the
WebSocket in the case of not receiving a HEARTBEAT_ACK. Useful if
processing the initial packets take too long to the point of disconnecting
you. The default timeout is 60 seconds.
guild_ready_timeout: :class:`float`
The maximum number of seconds to wait for the GUILD_CREATE stream to end before
preparing the member cache and firing READY. The default timeout is 2 seconds.
.. versionadded:: 1.4
assume_unsync_clock: :class:`bool`
Whether to assume the system clock is unsynced. This applies to the ratelimit handling
code. If this is set to ``True``, the default, then the library uses the time to reset
a rate limit bucket given by Discord. If this is ``False`` then your system clock is
used to calculate how long to sleep for. If this is set to ``False`` it is recommended to
sync your system clock to Google's NTP server.
.. versionadded:: 1.3
Attributes
-----------
ws
The websocket gateway the client is currently connected to. Could be ``None``.
loop: :class:`asyncio.AbstractEventLoop`
The event loop that the client uses for asynchronous operations.
"""
def __init__(self, *, loop=None, **options):
self.ws = None
self.loop = asyncio.get_event_loop() if loop is None else loop
self._listeners = {}
self.shard_id = options.get('shard_id')
self.shard_count = options.get('shard_count')
connector = options.pop('connector', None)
proxy = options.pop('proxy', None)
proxy_auth = options.pop('proxy_auth', None)
unsync_clock = options.pop('assume_unsync_clock', True)
self.http = HTTPClient(connector, proxy=proxy, proxy_auth=proxy_auth, unsync_clock=unsync_clock, loop=self.loop)
self._handlers = {
'ready': self._handle_ready
}
self._hooks = {
'before_identify': self._call_before_identify_hook
}
self._connection = self._get_state(**options)
self._connection.shard_count = self.shard_count
self._closed = False
self._ready = asyncio.Event()
self._connection._get_websocket = self._get_websocket
self._connection._get_client = lambda: self
if VoiceClient.warn_nacl:
VoiceClient.warn_nacl = False
log.warning("PyNaCl is not installed, voice will NOT be supported")
# internals
def _get_websocket(self, guild_id=None, *, shard_id=None):
return self.ws
def _get_state(self, **options):
return ConnectionState(dispatch=self.dispatch, handlers=self._handlers,
hooks=self._hooks, http=self.http, loop=self.loop, **options)
def _handle_ready(self):
self._ready.set()
@property
def latency(self):
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This could be referred to as the Discord WebSocket protocol latency.
"""
ws = self.ws
return float('nan') if not ws else ws.latency
def is_ws_ratelimited(self):
""":class:`bool`: Whether the websocket is currently rate limited.
This can be useful to know when deciding whether you should query members
using HTTP or via the gateway.
.. versionadded:: 1.6
"""
if self.ws:
return self.ws.is_ratelimited()
return False
@property
def user(self):
"""Optional[:class:`.ClientUser`]: Represents the connected client. ``None`` if not logged in."""
return self._connection.user
@property
def guilds(self):
"""List[:class:`.Guild`]: The guilds that the connected client is a member of."""
return self._connection.guilds
@property
def emojis(self):
"""List[:class:`.Emoji`]: The emojis that the connected client has."""
return self._connection.emojis
@property
def cached_messages(self):
"""Sequence[:class:`.Message`]: Read-only list of messages the connected client has cached.
.. versionadded:: 1.1
"""
return utils.SequenceProxy(self._connection._messages or [])
@property
def private_channels(self):
"""List[:class:`.abc.PrivateChannel`]: The private channels that the connected client is participating on.
.. note::
This returns only up to 128 most recent private channels due to an internal working
on how Discord deals with private channels.
"""
return self._connection.private_channels
@property
def voice_clients(self):
"""List[:class:`.VoiceProtocol`]: Represents a list of voice connections.
These are usually :class:`.VoiceClient` instances.
"""
return self._connection.voice_clients
@property
def application_id(self):
"""Optional[:class:`int`]: The client's application ID.
If this is not passed via ``__init__`` then this is retrieved
through the gateway when an event contains the data. Usually
after :func:`~discord.on_connect` is called.
"""
return self._connection.application_id
@property
def application_flags(self) -> ApplicationFlags:
""":class:`~discord.ApplicationFlags`: The client's application flags.
.. versionadded: 2.0
"""
return self._connection.application_flags # type: ignore
def is_ready(self):
""":class:`bool`: Specifies if the client's internal cache is ready for use."""
return self._ready.is_set()
async def _run_event(self, coro, event_name, *args, **kwargs):
try:
await coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
await self.on_error(event_name, *args, **kwargs)
except asyncio.CancelledError:
pass
def _schedule_event(self, coro, event_name, *args, **kwargs):
wrapped = self._run_event(coro, event_name, *args, **kwargs)
# Schedules the task
return asyncio.create_task(wrapped, name=f'discord.py: {event_name}')
def dispatch(self, event, *args, **kwargs):
log.debug('Dispatching event %s', event)
method = 'on_' + event
listeners = self._listeners.get(event)
if listeners:
removed = []
for i, (future, condition) in enumerate(listeners):
if future.cancelled():
removed.append(i)
continue
try:
result = condition(*args)
except Exception as exc:
future.set_exception(exc)
removed.append(i)
else:
if result:
if len(args) == 0:
future.set_result(None)
elif len(args) == 1:
future.set_result(args[0])
else:
future.set_result(args)
removed.append(i)
if len(removed) == len(listeners):
self._listeners.pop(event)
else:
for idx in reversed(removed):
del listeners[idx]
try:
coro = getattr(self, method)
except AttributeError:
pass
else:
self._schedule_event(coro, method, *args, **kwargs)
async def on_error(self, event_method, *args, **kwargs):
"""|coro|
The default error handler provided by the client.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
Check :func:`~discord.on_error` for more details.
"""
print(f'Ignoring exception in {event_method}', file=sys.stderr)
traceback.print_exc()
# hooks
async def _call_before_identify_hook(self, shard_id, *, initial=False):
# This hook is an internal hook that actually calls the public one.
# It allows the library to have its own hook without stepping on the
# toes of those who need to override their own hook.
await self.before_identify_hook(shard_id, initial=initial)
async def before_identify_hook(self, shard_id, *, initial=False):
"""|coro|
A hook that is called before IDENTIFYing a session. This is useful
if you wish to have more control over the synchronization of multiple
IDENTIFYing clients.
The default implementation sleeps for 5 seconds.
.. versionadded:: 1.4
Parameters
------------
shard_id: :class:`int`
The shard ID that requested being IDENTIFY'd
initial: :class:`bool`
Whether this IDENTIFY is the first initial IDENTIFY.
"""
if not initial:
await asyncio.sleep(5.0)
# login state management
async def login(self, token):
"""|coro|
Logs in the client with the specified credentials.
Parameters
-----------
token: :class:`str`
The authentication token. Do not prefix this token with
anything as the library will do it for you.
Raises
------
:exc:`.LoginFailure`
The wrong credentials are passed.
:exc:`.HTTPException`
An unknown HTTP related error occurred,
usually when it isn't 200 or the known incorrect credentials
passing status code.
"""
log.info('logging in using static token')
await self.http.static_login(token.strip())
async def connect(self, *, reconnect=True):
"""|coro|
Creates a websocket connection and lets the websocket listen
to messages from Discord. This is a loop that runs the entire
event system and miscellaneous aspects of the library. Control
is not resumed until the WebSocket connection is terminated.
Parameters
-----------
reconnect: :class:`bool`
If we should attempt reconnecting, either due to internet
failure or a specific failure on Discord's part. Certain
disconnects that lead to bad state will not be handled (such as
invalid sharding payloads or bad tokens).
Raises
-------
:exc:`.GatewayNotFound`
If the gateway to connect to Discord is not found. Usually if this
is thrown then there is a Discord API outage.
:exc:`.ConnectionClosed`
The websocket connection has been terminated.
"""
backoff = ExponentialBackoff()
ws_params = {
'initial': True,
'shard_id': self.shard_id,
}
while not self.is_closed():
try:
coro = DiscordWebSocket.from_client(self, **ws_params)
self.ws = await asyncio.wait_for(coro, timeout=60.0)
ws_params['initial'] = False
while True:
await self.ws.poll_event()
except ReconnectWebSocket as e:
log.info('Got a request to %s the websocket.', e.op)
self.dispatch('disconnect')
ws_params.update(sequence=self.ws.sequence, resume=e.resume, session=self.ws.session_id)
continue
except (OSError,
HTTPException,
GatewayNotFound,
ConnectionClosed,
aiohttp.ClientError,
asyncio.TimeoutError) as exc:
self.dispatch('disconnect')
if not reconnect:
await self.close()
if isinstance(exc, ConnectionClosed) and exc.code == 1000:
# clean close, don't re-raise this
return
raise
if self.is_closed():
return
# If we get connection reset by peer then try to RESUME
if isinstance(exc, OSError) and exc.errno in (54, 10054):
ws_params.update(sequence=self.ws.sequence, initial=False, resume=True, session=self.ws.session_id)
continue
# We should only get this when an unhandled close code happens,
# such as a clean disconnect (1000) or a bad state (bad token, no sharding, etc)
# sometimes, discord sends us 1000 for unknown reasons so we should reconnect
# regardless and rely on is_closed instead
if isinstance(exc, ConnectionClosed):
if exc.code == 4014:
raise PrivilegedIntentsRequired(exc.shard_id) from None
if exc.code != 1000:
await self.close()
raise
retry = backoff.delay()
log.exception("Attempting a reconnect in %.2fs", retry)
await asyncio.sleep(retry)
# Always try to RESUME the connection
# If the connection is not RESUME-able then the gateway will invalidate the session.
# This is apparently what the official Discord client does.
ws_params.update(sequence=self.ws.sequence, resume=True, session=self.ws.session_id)
async def close(self):
"""|coro|
Closes the connection to Discord.
"""
if self._closed:
return
self._closed = True
for voice in self.voice_clients:
try:
await voice.disconnect()
except Exception:
# if an error happens during disconnects, disregard it.
pass
if self.ws is not None and self.ws.open:
await self.ws.close(code=1000)
await self.http.close()
self._ready.clear()
def clear(self):
"""Clears the internal state of the bot.
After this, the bot can be considered "re-opened", i.e. :meth:`is_closed`
and :meth:`is_ready` both return ``False`` along with the bot's internal
cache cleared.
"""
self._closed = False
self._ready.clear()
self._connection.clear()
self.http.recreate()
async def start(self, token, *, reconnect=True):
"""|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
Raises
-------
TypeError
An unexpected keyword argument was received.
"""
await self.login(token)
await self.connect(reconnect=reconnect)
def run(self, *args, **kwargs):
"""A blocking call that abstracts away the event loop
initialisation from you.
If you want more control over the event loop then this
function should not be used. Use :meth:`start` coroutine
or :meth:`connect` + :meth:`login`.
Roughly Equivalent to: ::
try:
loop.run_until_complete(start(*args, **kwargs))
except KeyboardInterrupt:
loop.run_until_complete(close())
# cancel all tasks lingering
finally:
loop.close()
.. warning::
This function must be the last function to call due to the fact that it
is blocking. That means that registration of events or anything being
called after this function call will not execute until it returns.
"""
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
async def runner():
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def stop_loop_on_completion(f):
loop.stop()
future = asyncio.ensure_future(runner(), loop=loop)
future.add_done_callback(stop_loop_on_completion)
try:
loop.run_forever()
except KeyboardInterrupt:
log.info('Received signal to terminate bot and event loop.')
finally:
future.remove_done_callback(stop_loop_on_completion)
log.info('Cleaning up tasks.')
_cleanup_loop(loop)
if not future.cancelled():
try:
return future.result()
except KeyboardInterrupt:
# I am unsure why this gets raised here but suppress it anyway
return None
# properties
def is_closed(self):
""":class:`bool`: Indicates if the websocket connection is closed."""
return self._closed
@property
def activity(self):
"""Optional[:class:`.BaseActivity`]: The activity being used upon
logging in.
"""
return create_activity(self._connection._activity)
@activity.setter
def activity(self, value):
if value is None:
self._connection._activity = None
elif isinstance(value, BaseActivity):
self._connection._activity = value.to_dict()
else:
raise TypeError('activity must derive from BaseActivity.')
@property
def allowed_mentions(self):
"""Optional[:class:`~discord.AllowedMentions`]: The allowed mention configuration.
.. versionadded:: 1.4
"""
return self._connection.allowed_mentions
@allowed_mentions.setter
def allowed_mentions(self, value):
if value is None or isinstance(value, AllowedMentions):
self._connection.allowed_mentions = value
else:
raise TypeError(f'allowed_mentions must be AllowedMentions not {value.__class__!r}')
@property
def intents(self):
""":class:`~discord.Intents`: The intents configured for this connection.
.. versionadded:: 1.5
"""
return self._connection.intents
# helpers/getters
@property
def users(self):
"""List[:class:`~discord.User`]: Returns a list of all the users the bot can see."""
return list(self._connection._users.values())
def get_channel(self, id):
"""Returns a channel with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`]]
The returned channel or ``None`` if not found.
"""
return self._connection.get_channel(id)
def get_stage_instance(self, id) -> Optional[StageInstance]:
"""Returns a stage instance with the given stage channel ID.
.. versionadded:: 2.0
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`StageInstance`]
The returns stage instance of ``None`` if not found.
"""
from .channel import StageChannel
channel = self._connection.get_channel(id)
if isinstance(channel, StageChannel):
return channel.instance
def get_guild(self, id):
"""Returns a guild with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.Guild`]
The guild or ``None`` if not found.
"""
return self._connection._get_guild(id)
def get_user(self, id):
"""Returns a user with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`~discord.User`]
The user or ``None`` if not found.
"""
return self._connection.get_user(id)
def get_emoji(self, id):
"""Returns an emoji with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.Emoji`]
The custom emoji or ``None`` if not found.
"""
return self._connection.get_emoji(id)
def get_all_channels(self):
"""A generator that retrieves every :class:`.abc.GuildChannel` the client can 'access'.
This is equivalent to: ::
for guild in client.guilds:
for channel in guild.channels:
yield channel
.. note::
Just because you receive a :class:`.abc.GuildChannel` does not mean that
you can communicate in said channel. :meth:`.abc.GuildChannel.permissions_for` should
be used for that.
Yields
------
:class:`.abc.GuildChannel`
A channel the client can 'access'.
"""
for guild in self.guilds:
yield from guild.channels
def get_all_members(self):
"""Returns a generator with every :class:`.Member` the client can see.
This is equivalent to: ::
for guild in client.guilds:
for member in guild.members:
yield member
Yields
------
:class:`.Member`
A member the client can see.
"""
for guild in self.guilds:
yield from guild.members
# listeners/waiters
async def wait_until_ready(self):
"""|coro|
Waits until the client's internal cache is all ready.
"""
await self._ready.wait()
def wait_for(self, event, *, check=None, timeout=None):
"""|coro|
Waits for a WebSocket event to be dispatched.
This could be used to wait for a user to reply to a message,
or to react to a message, or to edit a message in a self-contained
way.
The ``timeout`` parameter is passed onto :func:`asyncio.wait_for`. By default,
it does not timeout. Note that this does propagate the
:exc:`asyncio.TimeoutError` for you in case of timeout and is provided for
ease of use.
In case the event returns multiple arguments, a :class:`tuple` containing those
arguments is returned instead. Please check the
:ref:`documentation <discord-api-events>` for a list of events and their
parameters.
This function returns the **first event that meets the requirements**.
Examples
---------
Waiting for a user reply: ::
@client.event
async def on_message(message):
if message.content.startswith('$greet'):
channel = message.channel
await channel.send('Say hello!')
def check(m):
return m.content == 'hello' and m.channel == channel
msg = await client.wait_for('message', check=check)
await channel.send(f'Hello {msg.author}!')
Waiting for a thumbs up reaction from the message author: ::
@client.event
async def on_message(message):
if message.content.startswith('$thumb'):
channel = message.channel
await channel.send('Send me that \N{THUMBS UP SIGN} reaction, mate')
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '\N{THUMBS UP SIGN}'
try:
reaction, user = await client.wait_for('reaction_add', timeout=60.0, check=check)
except asyncio.TimeoutError:
await channel.send('\N{THUMBS DOWN SIGN}')
else:
await channel.send('\N{THUMBS UP SIGN}')
Parameters
------------
event: :class:`str`
The event name, similar to the :ref:`event reference <discord-api-events>`,
but without the ``on_`` prefix, to wait for.
check: Optional[Callable[..., :class:`bool`]]
A predicate to check what to wait for. The arguments must meet the
parameters of the event being waited for.
timeout: Optional[:class:`float`]
The number of seconds to wait before timing out and raising
:exc:`asyncio.TimeoutError`.
Raises
-------
asyncio.TimeoutError
If a timeout is provided and it was reached.
Returns
--------
Any
Returns no arguments, a single argument, or a :class:`tuple` of multiple
arguments that mirrors the parameters passed in the
:ref:`event reference <discord-api-events>`.
"""
future = self.loop.create_future()
if check is None:
def _check(*args):
return True
check = _check
ev = event.lower()
try:
listeners = self._listeners[ev]
except KeyError:
listeners = []
self._listeners[ev] = listeners
listeners.append((future, check))
return asyncio.wait_for(future, timeout)
# event registration
def event(self, coro):
"""A decorator that registers an event to listen to.
You can find more info about the events on the :ref:`documentation below <discord-api-events>`.
The events must be a :ref:`coroutine <coroutine>`, if not, :exc:`TypeError` is raised.
Example
---------
.. code-block:: python3
@client.event
async def on_ready():
print('Ready!')
Raises
--------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('event registered must be a coroutine function')
setattr(self, coro.__name__, coro)
log.debug('%s has successfully been registered as an event', coro.__name__)
return coro
async def change_presence(self, *, activity=None, status=None, afk=False):
"""|coro|
Changes the client's presence.
Example
---------
.. code-block:: python3
game = discord.Game("with the API")
await client.change_presence(status=discord.Status.idle, activity=game)
Parameters
----------
activity: Optional[:class:`.BaseActivity`]
The activity being done. ``None`` if no currently active activity is done.
status: Optional[:class:`.Status`]
Indicates what status to change to. If ``None``, then
:attr:`.Status.online` is used.
afk: Optional[:class:`bool`]
Indicates if you are going AFK. This allows the discord
client to know how to handle push notifications better
for you in case you are actually idle and not lying.
Raises
------
:exc:`.InvalidArgument`
If the ``activity`` parameter is not the proper type.
"""
if status is None:
status = 'online'
status_enum = Status.online
elif status is Status.offline:
status = 'invisible'
status_enum = Status.offline
else:
status_enum = status
status = str(status)
await self.ws.change_presence(activity=activity, status=status, afk=afk)
for guild in self._connection.guilds:
me = guild.me
if me is None:
continue
if activity is not None:
me.activities = (activity,)
else:
me.activities = ()
me.status = status_enum
# Guild stuff
def fetch_guilds(self, *, limit: int = 100, before: SnowflakeTime = None, after: SnowflakeTime = None) -> List[Guild]:
"""Retrieves an :class:`.AsyncIterator` that enables receiving your guilds.
.. note::
Using this, you will only receive :attr:`.Guild.owner`, :attr:`.Guild.icon`,
:attr:`.Guild.id`, and :attr:`.Guild.name` per :class:`.Guild`.
.. note::
This method is an API call. For general usage, consider :attr:`guilds` instead.
Examples
---------
Usage ::
async for guild in client.fetch_guilds(limit=150):
print(guild.name)
Flattening into a list ::
guilds = await client.fetch_guilds(limit=150).flatten()
# guilds is now a list of Guild...
All parameters are optional.
Parameters
-----------
limit: Optional[:class:`int`]
The number of guilds to retrieve.
If ``None``, it retrieves every guild you have access to. Note, however,
that this would make it a slow operation.
Defaults to ``100``.
before: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]
Retrieves guilds before this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
after: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]
Retrieve guilds after this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
Raises
------
:exc:`.HTTPException`
Getting the guilds failed.
Yields
--------
:class:`.Guild`
The guild with the guild data parsed.
"""
return GuildIterator(self, limit=limit, before=before, after=after)
async def fetch_template(self, code):
"""|coro|
Gets a :class:`.Template` from a discord.new URL or code.
Parameters
-----------
code: Union[:class:`.Template`, :class:`str`]
The Discord Template Code or URL (must be a discord.new URL).
Raises
-------
:exc:`.NotFound`
The template is invalid.
:exc:`.HTTPException`
Getting the template failed.
Returns
--------
:class:`.Template`
The template from the URL/code.
"""
code = utils.resolve_template(code)
data = await self.http.get_template(code)
return Template(data=data, state=self._connection) # type: ignore
async def fetch_guild(self, guild_id):
"""|coro|
Retrieves a :class:`.Guild` from an ID.
.. note::
Using this, you will **not** receive :attr:`.Guild.channels`, :attr:`.Guild.members`,
:attr:`.Member.activity` and :attr:`.Member.voice` per :class:`.Member`.
.. note::
This method is an API call. For general usage, consider :meth:`get_guild` instead.
Parameters
-----------
guild_id: :class:`int`
The guild's ID to fetch from.
Raises
------
:exc:`.Forbidden`
You do not have access to the guild.
:exc:`.HTTPException`
Getting the guild failed.
Returns
--------
:class:`.Guild`
The guild from the ID.
"""
data = await self.http.get_guild(guild_id)
return Guild(data=data, state=self._connection)
async def create_guild(self, name: str, region: Optional[VoiceRegion] = None, icon: Any = None, *, code: str = None):
"""|coro|
Creates a :class:`.Guild`.
Bot accounts in more than 10 guilds are not allowed to create guilds.
Parameters
----------
name: :class:`str`
The name of the guild.
region: :class:`.VoiceRegion`
The region for the voice communication server.
Defaults to :attr:`.VoiceRegion.us_west`.
icon: :class:`bytes`
The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit`
for more details on what is expected.
code: Optional[:class:`str`]
The code for a template to create the guild with.
.. versionadded:: 1.4
Raises
------
:exc:`.HTTPException`
Guild creation failed.
:exc:`.InvalidArgument`
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`.Guild`
The guild created. This is not the same guild that is
added to cache.
"""
if icon is not None:
icon = utils._bytes_to_base64_data(icon)
region = region or VoiceRegion.us_west
region_value = region.value
if code:
data = await self.http.create_from_template(code, name, region_value, icon)
else:
data = await self.http.create_guild(name, region_value, icon)
return Guild(data=data, state=self._connection)
async def fetch_stage_instance(self, channel_id: int) -> StageInstance:
"""|coro|
Gets a :class:`StageInstance` for a stage channel id.
.. versionadded:: 2.0
Parameters
-----------
channel_id: :class:`int`
The stage channel ID.
Raises
-------
:exc:`.NotFound`
The stage instance or channel could not be found.
:exc:`.HTTPException`
Getting the stage instance failed.
Returns
--------
:class:`StageInstance`
The stage instance from the stage channel ID.
"""
data = await self.http.get_stage_instance(channel_id)
guild = self.get_guild(int(data['guild_id']))
return StageInstance(guild=guild, state=self._connection, data=data) # type: ignore
# Invite management
async def fetch_invite(self, url: Union[Invite, str], *, with_counts: bool = True, with_expiration: bool = True) -> Invite:
"""|coro|
Gets an :class:`.Invite` from a discord.gg URL or ID.
.. note::
If the invite is for a guild you have not joined, the guild and channel
attributes of the returned :class:`.Invite` will be :class:`.PartialInviteGuild` and
:class:`.PartialInviteChannel` respectively.
Parameters
-----------
url: Union[:class:`.Invite`, :class:`str`]
The Discord invite ID or URL (must be a discord.gg URL).
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`.Invite.approximate_member_count` and :attr:`.Invite.approximate_presence_count`
fields.
with_expiration: :class:`bool`
Whether to include the expiration date of the invite. This fills the
:attr:`.Invite.expires_at` field.
.. versionadded:: 2.0
Raises
-------
:exc:`.NotFound`
The invite has expired or is invalid.
:exc:`.HTTPException`
Getting the invite failed.
Returns
--------
:class:`.Invite`
The invite from the URL/ID.
"""
invite_id = utils.resolve_invite(url)
data = await self.http.get_invite(invite_id, with_counts=with_counts, with_expiration=with_expiration)
return Invite.from_incomplete(state=self._connection, data=data)
async def delete_invite(self, invite: Union[Invite, str]) -> None:
"""|coro|
Revokes an :class:`.Invite`, URL, or ID to an invite.
You must have the :attr:`~.Permissions.manage_channels` permission in
the associated guild to do this.
Parameters
----------
invite: Union[:class:`.Invite`, :class:`str`]
The invite to revoke.
Raises
-------
:exc:`.Forbidden`
You do not have permissions to revoke invites.
:exc:`.NotFound`
The invite is invalid or expired.
:exc:`.HTTPException`
Revoking the invite failed.
"""
invite_id = utils.resolve_invite(invite)
await self.http.delete_invite(invite_id)
# Miscellaneous stuff
async def fetch_widget(self, guild_id):
"""|coro|
Gets a :class:`.Widget` from a guild ID.
.. note::
The guild must have the widget enabled to get this information.
Parameters
-----------
guild_id: :class:`int`
The ID of the guild.
Raises
-------
:exc:`.Forbidden`
The widget for this guild is disabled.
:exc:`.HTTPException`
Retrieving the widget failed.
Returns
--------
:class:`.Widget`
The guild's widget.
"""
data = await self.http.get_widget(guild_id)
return Widget(state=self._connection, data=data)
async def application_info(self):
"""|coro|
Retrieves the bot's application information.
Raises
-------
:exc:`.HTTPException`
Retrieving the information failed somehow.
Returns
--------
:class:`.AppInfo`
The bot's application information.
"""
data = await self.http.application_info()
if 'rpc_origins' not in data:
data['rpc_origins'] = None
return AppInfo(self._connection, data)
async def fetch_user(self, user_id):
"""|coro|
Retrieves a :class:`~discord.User` based on their ID.
You do not have to share any guilds with the user to get this information,
however many operations do require that you do.
.. note::
This method is an API call. If you have :attr:`discord.Intents.members` and member cache enabled, consider :meth:`get_user` instead.
Parameters
-----------
user_id: :class:`int`
The user's ID to fetch from.
Raises
-------
:exc:`.NotFound`
A user with this ID does not exist.
:exc:`.HTTPException`
Fetching the user failed.
Returns
--------
:class:`~discord.User`
The user you requested.
"""
data = await self.http.get_user(user_id)
return User(state=self._connection, data=data)
async def fetch_channel(self, channel_id):
"""|coro|
Retrieves a :class:`.abc.GuildChannel` or :class:`.abc.PrivateChannel` with the specified ID.
.. note::
This method is an API call. For general usage, consider :meth:`get_channel` instead.
.. versionadded:: 1.2
Raises
-------
:exc:`.InvalidData`
An unknown channel type was received from Discord.
:exc:`.HTTPException`
Retrieving the channel failed.
:exc:`.NotFound`
Invalid Channel ID.
:exc:`.Forbidden`
You do not have permission to fetch this channel.
Returns
--------
Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`]
The channel from the ID.
"""
data = await self.http.get_channel(channel_id)
factory, ch_type = _channel_factory(data['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data))
if ch_type in (ChannelType.group, ChannelType.private):
channel = factory(me=self.user, data=data, state=self._connection)
else:
guild_id = int(data['guild_id'])
guild = self.get_guild(guild_id) or Object(id=guild_id)
channel = factory(guild=guild, state=self._connection, data=data)
return channel
async def fetch_webhook(self, webhook_id):
"""|coro|
Retrieves a :class:`.Webhook` with the specified ID.
Raises
--------
:exc:`.HTTPException`
Retrieving the webhook failed.
:exc:`.NotFound`
Invalid webhook ID.
:exc:`.Forbidden`
You do not have permission to fetch this webhook.
Returns
---------
:class:`.Webhook`
The webhook you requested.
"""
data = await self.http.get_webhook(webhook_id)
return Webhook.from_state(data, state=self._connection)
async def create_dm(self, user):
"""|coro|
Creates a :class:`.DMChannel` with this user.
This should be rarely called, as this is done transparently for most
people.
.. versionadded:: 2.0
Parameters
-----------
user: :class:`~discord.abc.Snowflake`
The user to create a DM with.
Returns
-------
:class:`.DMChannel`
The channel that was created.
"""
state = self._connection
found = state._get_private_channel_by_user(user.id)
if found:
return found
data = await state.http.start_private_message(user.id)
return state.add_dm_channel(data)
def add_view(self, view: View, *, message_id: Optional[int] = None) -> None:
"""Registers a :class:`~discord.ui.View` for persistent listening.
This method should be used for when a view is comprised of components
that last longer than the lifecycle of the program.
Parameters
------------
view: :class:`discord.ui.View`
The view to register for dispatching.
message_id: Optional[:class:`int`]
The message ID that the view is attached to. This is currently used to
refresh the view's state during message update events. If not given
then message update events are not propagated for the view.
Raises
-------
TypeError
A view was not passed.
ValueError
The view is not persistent. A persistent view has no timeout
and all their components have an explicitly provided custom_id.
"""
if not isinstance(view, View):
raise TypeError(f'expected an instance of View not {view.__class__!r}')
if not view.is_persistent():
raise ValueError('View is not persistent. Items need to have a custom_id set and View must have no timeout')
self._connection.store_view(view, message_id)
@property
def persistent_views(self) -> Sequence[View]:
"""Sequence[:class:`View`]: A sequence of persistent views added to the client."""
return self._connection.persistent_views
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 9 21:31:53 2015\n\nCreate random synthetic velocity profile + linear first guesses\n\n@author: alex\n\"\"\"\n\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef savitzky... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 9 21:31:53 2015\n\nCreate random synthetic velocity profile + linear first guesses\n\n@author: alex\n\"\"\"\n\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D... | ```python
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 21:31:53 2015
Create random synthetic velocity profile + linear first guesses
@author: alex
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
plt.close('all')
random.seed(2)
X = range(256)
Z = np.linspace(0,1000,256)
pos = 50
posVpVs = 1.7
L = np.array([pos])
vpvs = np.array([posVpVs])
for x in X[1:]:
pos += random.choice((-0.9,1)) #random.choice((-1,1))
posVpVs += random.choice((-0.02,0.02))
L=np.append(L,pos)
vpvs = np.append(vpvs,posVpVs)
L=70*L
Vp = savitzky_golay(L, 51, 3) # window size 51, polynomial order 3
A = np.array([ Z, np.ones(256)])
Vs = Vp/savitzky_golay(vpvs, 51, 3) # window size 51, polynomial order 3
w = np.linalg.lstsq(A.T,Vp)[0] # obtaining the parameters
# plotting the line
lineP = w[0]*Z+w[1]+500 # regression line
w = np.linalg.lstsq(A.T,Vs)[0] # obtaining the parameters
# plotting the line
lineS = w[0]*Z+w[1]-250 # regression line
plt.figure()
plt.hold(True)
plt.plot(L,Z,label="Random walk")
plt.plot(Vp,Z,linewidth=4,label="P wave velocity from this random walk")
plt.plot(lineP,Z,linewidth=4,label="First guess")
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.legend()
plt.figure()
plt.hold(True)
plt.plot(vpvs,Z,linewidth=4,label="Random walk vp/vs")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.figure()
plt.hold(True)
plt.plot(Vs,Z,linewidth=4,label="S wave velocity from random vp/vs")
plt.plot(lineS,Z,linewidth=4,label="First guess")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
# Save profiles
np.savetxt("dataExample/realProfileP.txt",np.dstack((Z,Vp))[0])
np.savetxt("dataExample/realProfileS.txt",np.dstack((Z,Vs))[0])
np.savetxt("dataExample/firstGuessP.txt",np.dstack((Z,lineP))[0])
np.savetxt("dataExample/firstGuessS.txt",np.dstack((Z,lineS))[0])
#####################################################################
coordShotsX=[300,500]
coordShotsY=[400]
coordShotsZ=[650]
coordStatsX=[200,300,400,500,600]
coordStatsY=[200,300,400,500,600]
coordStatsZ=[200,300,400,500,600]
Xshots=[]
Yshots=[]
Zshots=[]
Xstats=[]
Ystats=[]
Zstats=[]
#Open a file in write mode:
fo = open("dataExample/coordShots.txt", "w+")
for coordX in coordShotsX:
for coordY in coordShotsY:
for coordZ in coordShotsZ:
Xshots.append(coordX)
Yshots.append(coordY)
Zshots.append(coordZ)
fo.write(str(coordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
#Open a file in write mode:
fo = open("dataExample/coordStats.txt", "w+")
for coordX in coordStatsX:
for coordY in coordStatsY:
for coordZ in coordStatsZ:
Xstats.append(coordX)
Ystats.append(coordY)
Zstats.append(coordZ)
fo.write(str(coordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
fig = plt.figure()
ax = fig.gca(projection='3d') #Axes3D(fig)
ax.hold(True)
ax.scatter(Xstats,Ystats,Zstats,zdir='z',s=20,c='b')
if (len(coordShotsX) > 3):
ax.scatter(Xshots,Yshots,Zshots,zdir='z',s=20,c='r',marker='^')
else:
ax.scatter(Xshots,Yshots,Zshots,zdir='z',s=200,c='r',marker='^')
ax.set_xlim3d(min(min(Xshots),min(Xstats))-100,max(max(Xshots),max(Xstats))+100)
ax.set_ylim3d(min(min(Yshots),min(Ystats))-100,max(max(Yshots),max(Ystats))+100)
ax.set_zlim3d(min(min(Zshots),min(Zstats))-100,max(max(Zshots),max(Zstats))+100)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_zlabel('Z (m)')
ax.set_title('Geometry')
ax.invert_zaxis()
``` |
[
{
"content": "Here is the source code:\n```python\nfrom ovito import *\nfrom ovito.io import *\nfrom ovito.modifiers import *\nimport numpy as np\n\nnode = import_file(\"../../files/NetCDF/sheared_aSi.nc\")\n\nmodifier = WignerSeitzAnalysisModifier()\nnode.modifiers.append(modifier)\nmodifier.reference.load(\".... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\nfrom ovito import *\nfrom ovito.io import *\nfrom ovito.modifiers import *\nimport numpy as np\n\nnode = import_file(\"../../files/NetCDF/sheared_aSi.nc\")\n\nmodifier = WignerSeitzAnalysisModifier()\nnode.modifiers.append(modifier)\nmodifier.re... | ```python
from ovito import *
from ovito.io import *
from ovito.modifiers import *
import numpy as np
node = import_file("../../files/NetCDF/sheared_aSi.nc")
modifier = WignerSeitzAnalysisModifier()
node.modifiers.append(modifier)
modifier.reference.load("../../files/NetCDF/sheared_aSi.nc")
dataset.anim.current_frame = 4
print("Parameter defaults:")
print(" eliminate_cell_deformation: {}".format(modifier.eliminate_cell_deformation))
modifier.eliminate_cell_deformation = True
print(" frame_offset: {}".format(modifier.frame_offset))
modifier.frame_offset = 0
print(" reference_frame: {}".format(modifier.reference_frame))
modifier.reference_frame = 0
print(" use_frame_offset: {}".format(modifier.use_frame_offset))
modifier.use_frame_offset = False
node.compute()
print("Output:")
print(" vacancy_count= {}".format(modifier.vacancy_count))
print(" interstitial_count= {}".format(modifier.interstitial_count))
print(" vacancy_count= {}".format(node.output.attributes['WignerSeitz.vacancy_count']))
print(" interstitial_count= {}".format(node.output.attributes['WignerSeitz.interstitial_count']))
print(node.output["Occupancy"].array)
assert(node.output.attributes['WignerSeitz.vacancy_count'] == 970)
assert(modifier.vacancy_count == 970)
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\nfrom infoporto.odoo.core.odoo import OdooInstance\n\n\nclass Odoo(object):\n\n # settings\n def getCurrency(self):\n \"\"\" Retrieve currency from Odoo Company settings \"\"\"\n odoo_core = OdooInstance... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\nfrom infoporto.odoo.core.odoo import OdooInstance\n\n\nclass Odoo(object):\n\n # settings\n def getCurrency(self):\n \"\"\" Retrieve currency from Odoo Company settings \"\"\"\n odoo_cor... | ```python
from infoporto.odoo.core.odoo import OdooInstance
class Odoo(object):
# settings
def getCurrency(self):
""" Retrieve currency from Odoo Company settings """
odoo_core = OdooInstance()
# company ID should be dynamic
return odoo_core.read('res.company', 1, ['currency_id'])
# product.category
def getAncestors(self, cid):
""" Retrieve recursively all parents for the given cid """
odoo_core = OdooInstance()
res = []
last_found = cid
while last_found:
category = odoo_core.read('product.category', int(last_found), ['id', 'name', 'parent_id'])
if category['parent_id']:
last_found = category['parent_id'][0]
else:
last_found = False
res.append(dict(id=category['id'], name=category['name']))
return reversed(res)
def getCategory(self, cid):
odoo_core = OdooInstance()
category = odoo_core.read('product.category', [int(cid)], ['id', 'name'])
return category[0]
def getCategories(self, cid=False):
odoo_core = OdooInstance()
if not cid:
args = [('parent_id', '=', False)]
else:
args = [('parent_id', '=', int(cid))]
ids = odoo_core.search('product.category', args)
categories = odoo_core.read('product.category', ids, ['id', 'name'])
return categories
def getProducts(self, cid=False):
odoo_core = OdooInstance()
if not cid:
args = []
else:
args = [('categ_id', '=', int(cid))]
ids = odoo_core.search('product.product', args)
products = odoo_core.read('product.product', ids,
['id', 'name', 'description',
'lst_price', 'image', 'image_medium',
'categ_id', 'taxes_id'])
for product in products:
if product['taxes_id']:
tax = odoo_core.read('account.tax',
int(product['taxes_id'][0]), ['amount'])['amount']
else:
tax = 0.0
product['tax'] = tax
product = self.sanitizeProduct(product)
return products
# product.product
def getProduct(self, pid):
odoo_core = OdooInstance()
product = odoo_core.read('product.product', int(pid),
['id', 'name', 'description',
'lst_price', 'image', 'image_medium',
'categ_id', 'taxes_id'])
if product['taxes_id']:
tax = odoo_core.read('account.tax',
int(product['taxes_id'][0]), ['amount'])['amount']
else:
tax = 0.0
product['tax'] = tax
return self.sanitizeProduct(product)
def getInShowcase(self):
#odoo_core = OdooInstance()
#TODO: an attribute shoudl be added to Odoo product management
return self.getProducts()
def sanitizeProduct(self, p):
""" Sanitize product for using in templates """
from money import Money
p['price'] = p['lst_price']
p['lst_price'] = Money(amount=p['lst_price'],
currency=self.getCurrency().get('currency_id')[1])
p['price_total'] = Money(amount=p['price'] * (1 + p['tax']),
currency=self.getCurrency().get('currency_id')[1])
p['categ_id'] = p['categ_id'][0]
# Category norm
if p['image']:
p['image'] = ''.join(["data:image/png;base64,", p['image']])
if p['image_medium']:
p['image_medium'] = ''.join(["data:image/png;base64,", p['image_medium']])
return p
def createSalesOrder(self, params, cart):
""" Create a partner if the e-mail weren't found, create a Sales Order
and its Sales Order Line """
odoo_core = OdooInstance()
# check if user exists ...
args = [('email', '=', params['user']['email'])]
ids = odoo_core.search('res.partner', args)
# ... otherwise create it
if not ids:
partner_id = odoo_core.create('res.partner',
dict(name=params['user']['name'],
email=params['user']['email']))
# build sales order
so = dict(partner_id=ids[0] or partner_id,
state="manual",
amount_total=params['total'] * 1.22,
amount_tax=params['total'] * 1.22 - params['total'],
amount_untaxed=params['total'])
so_id = odoo_core.create('sale.order', so)
for el in cart:
sol = dict(order_id=so_id,
product_uom=1,
price_unit=float(el['price_total']),
product_uom_qty=1,
state='confirmed',
product_id=el['id'],
order_partner_id=ids[0],
tax_id=[1])
sol_id = odoo_core.create('sale.order.line', sol)
#FIXME: taxes?!?
return so_id
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n#\n# PyQt documentation build configuration file, created by\n# sphinx-quickstart on Sat May 30 14:28:55 2009.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present ... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# PyQt documentation build configuration file, created by\n# sphinx-quickstart on Sat May 30 14:28:55 2009.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration val... | ```python
# -*- coding: utf-8 -*-
#
# PyQt documentation build configuration file, created by
# sphinx-quickstart on Sat May 30 14:28:55 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyQt'
copyright = u'2015 Riverbank Computing Limited'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.11.4'
# The full version, including alpha/beta/rc tags.
release = '4.11.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
#exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "PyQt 4.11.4 Reference Guide"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'static/logo_tn.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyQtdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyQt.tex', u'PyQt Documentation',
u'Riverbank Computing Limited', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
def setup(app):
""" Define roles specific to PyQt. """
pass
``` |
[
{
"content": "Write the code verbatim:\n```python\n# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ht... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License... | ```python
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MovieLens dataset."""
import os
import textwrap
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.structured import movielens_parsing
_CITATION = """
@article{10.1145/2827872,
author = {Harper, F. Maxwell and Konstan, Joseph A.},
title = {The MovieLens Datasets: History and Context},
year = {2015},
issue_date = {January 2016},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {5},
number = {4},
issn = {2160-6455},
url = {https://doi.org/10.1145/2827872},
doi = {10.1145/2827872},
journal = {ACM Trans. Interact. Intell. Syst.},
month = dec,
articleno = {19},
numpages = {19},
keywords = {Datasets, recommendations, ratings, MovieLens}
}
"""
_DESCRIPTION = """
This dataset contains a set of movie ratings from the MovieLens website, a movie
recommendation service. This dataset was collected and maintained by [GroupLens]
(https://grouplens.org/), a research group at the University of Minnesota. There
are 5 versions included: "25m", "latest-small", "100k", "1m", "20m". In all
datasets, the movies data and ratings data are joined on "movieId". The 25m
dataset, latest-small dataset, and 20m dataset contain only movie data and
rating data. The 1m dataset and 100k dataset contain demographic data in
addition to movie and rating data.
- "25m": This is the latest stable version of the MovieLens dataset. It is
recommended for research purposes.
- "latest-small": This is a small subset of the latest version of the MovieLens
dataset. It is changed and updated over time by GroupLens.
- "100k": This is the oldest version of the MovieLens datasets. It is a small
dataset with demographic data.
- "1m": This is the largest MovieLens dataset that contains demographic data.
- "20m": This is one of the most used MovieLens datasets in academic papers
along with the 1m dataset.
For each version, users can view either only the movies data by adding the
"-movies" suffix (e.g. "25m-movies") or the ratings data joined with the movies
data (and users data in the 1m and 100k datasets) by adding the "-ratings"
suffix (e.g. "25m-ratings").
The features below are included in all versions with the "-ratings" suffix.
- "movie_id": a unique identifier of the rated movie
- "movie_title": the title of the rated movie with the release year in
parentheses
- "movie_genres": a sequence of genres to which the rated movie belongs
- "user_id": a unique identifier of the user who made the rating
- "user_rating": the score of the rating on a five-star scale
- "timestamp": the timestamp of the ratings, represented in seconds since
midnight Coordinated Universal Time (UTC) of January 1, 1970
The "100k-ratings" and "1m-ratings" versions in addition include the following
demographic features.
- "user_gender": gender of the user who made the rating; a true value
corresponds to male
- "bucketized_user_age": bucketized age values of the user who made the rating,
the values and the corresponding ranges are:
- 1: "Under 18"
- 18: "18-24"
- 25: "25-34"
- 35: "35-44"
- 45: "45-49"
- 50: "50-55"
- 56: "56+"
- "user_occupation_label": the occupation of the user who made the rating
represented by an integer-encoded label; labels are preprocessed to be
consistent across different versions
- "user_occupation_text": the occupation of the user who made the rating in
the original string; different versions can have different set of raw text
labels
- "user_zip_code": the zip code of the user who made the rating
In addition, the "100k-ratings" dataset would also have a feature "raw_user_age"
which is the exact ages of the users who made the rating
Datasets with the "-movies" suffix contain only "movie_id", "movie_title", and
"movie_genres" features.
"""
_FORMAT_VERSIONS = ['25m', 'latest-small', '20m', '100k', '1m']
_TABLE_OPTIONS = ['movies', 'ratings']
class MovieLensConfig(tfds.core.BuilderConfig):
"""BuilderConfig for MovieLens dataset."""
def __init__(self,
format_version: Optional[str] = None,
table_option: Optional[str] = None,
download_url: Optional[str] = None,
parsing_fn: Optional[Callable[[str], Iterator[Tuple[int, Dict[
str, Any]]],]] = None,
**kwargs) -> None:
"""Constructs a MovieLensConfig.
Args:
format_version: a string to identify the format of the dataset, one of
'_FORMAT_VERSIONS'.
table_option: a string to identify the table to expose, one of
'_TABLE_OPTIONS'.
download_url: a string url for downloading the dataset.
parsing_fn: a callable for parsing the data.
**kwargs: keyword arguments forwarded to super.
Raises:
ValueError: if format_version is not one of '_FORMAT_VERSIONS' or if
table_option is not one of '_TABLE_OPTIONS'.
"""
if format_version not in _FORMAT_VERSIONS:
raise ValueError('format_version must be one of %s.' % _FORMAT_VERSIONS)
if table_option not in _TABLE_OPTIONS:
raise ValueError('table_option must be one of %s.' % _TABLE_OPTIONS)
super(MovieLensConfig, self).__init__(**kwargs)
self._format_version = format_version
self._table_option = table_option
self._download_url = download_url
self._parsing_fn = parsing_fn
@property
def format_version(self) -> str:
return self._format_version
@property
def table_option(self) -> str:
return self._table_option
@property
def download_url(self) -> str:
return self._download_url
@property
def parsing_fn(
self) -> Optional[Callable[[str], Iterator[Tuple[int, Dict[str, Any]]],]]:
return self._parsing_fn
class Movielens(tfds.core.GeneratorBasedBuilder):
"""MovieLens rating dataset."""
BUILDER_CONFIGS = [
MovieLensConfig(
name='25m-ratings',
description=textwrap.dedent("""\
This dataset contains 25,000,095 ratings across 62,423 movies,
created by 162,541 users between January 09, 1995 and November 21,
2019. This dataset is the latest stable version of the MovieLens
dataset, generated on November 21, 2019.
Each user has rated at least 20 movies. The ratings are in
half-star increments. This dataset does not include demographic
data."""),
version='0.1.0',
format_version='25m',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-25m.zip'),
parsing_fn=movielens_parsing.parse_current_ratings_data,
),
MovieLensConfig(
name='25m-movies',
description=textwrap.dedent("""\
This dataset contains data of 62,423 movies rated in the 25m
dataset."""),
version='0.1.0',
format_version='25m',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-25m.zip'),
parsing_fn=movielens_parsing.parse_current_movies_data,
),
# The latest-small dataset is changed over time. Its checksum might need
# updating in the future.
MovieLensConfig(
name='latest-small-ratings',
description=textwrap.dedent("""\
This dataset contains 100,836 ratings across 9,742 movies, created
by 610 users between March 29, 1996 and September 24, 2018. This
dataset is generated on September 26, 2018 and is the a subset of
the full latest version of the MovieLens dataset. This dataset
is changed and updated over time.
Each user has rated at least 20 movies. The ratings are in
half-star increments. This dataset does not include demographic
data."""),
version='0.1.0',
format_version='latest-small',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-latest-small.zip'),
parsing_fn=movielens_parsing.parse_current_ratings_data,
),
MovieLensConfig(
name='latest-small-movies',
description=textwrap.dedent("""\
This dataset contains data of 9,742 movies rated in the
latest-small dataset."""),
version='0.1.0',
format_version='latest-small',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-latest-small.zip'),
parsing_fn=movielens_parsing.parse_current_movies_data,
),
MovieLensConfig(
name='100k-ratings',
description=textwrap.dedent("""\
This dataset contains 100,000 ratings from 943 users on 1,682
movies. This dataset is the oldest version of the MovieLens
dataset.
Each user has rated at least 20 movies. Ratings are in whole-star
increments. This dataset contains demographic data of users in
addition to data on movies and ratings."""),
version='0.1.0',
format_version='100k',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-100k.zip'),
parsing_fn=movielens_parsing.parse_100k_ratings_data,
),
MovieLensConfig(
name='100k-movies',
description=textwrap.dedent("""\
This dataset contains data of 1,682 movies rated in the 100k
dataset."""),
version='0.1.0',
format_version='100k',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-100k.zip'),
parsing_fn=movielens_parsing.parse_100k_movies_data,
),
MovieLensConfig(
name='1m-ratings',
description=textwrap.dedent("""\
This dataset contains 1,000,209 anonymous ratings of approximately
3,900 movies made by 6,040 MovieLens users who joined MovieLens in
2000. This dataset is the largest dataset that includes
demographic data.
Each user has rated at least 20 movies. Ratings are in whole-star
increments. In demographic data, age values are divided into
ranges and the lowest age value for each range is used in the data
instead of the actual values."""),
version='0.1.0',
format_version='1m',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-1m.zip'),
parsing_fn=movielens_parsing.parse_1m_ratings_data,
),
MovieLensConfig(
name='1m-movies',
description=textwrap.dedent("""\
This dataset contains data of approximately 3,900 movies rated in
the 1m dataset."""),
version='0.1.0',
format_version='1m',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-1m.zip'),
parsing_fn=movielens_parsing.parse_1m_movies_data,
),
MovieLensConfig(
name='20m-ratings',
description=textwrap.dedent("""\
This dataset contains 20,000,263 ratings across 27,278
movies, created by 138,493 users between January 09, 1995 and
March 31, 2015. This dataset was generated on October 17, 2016.
Each user has rated at least 20 movies. Ratings are in half-star
increments. This dataset does not contain demographic data."""),
version='0.1.0',
format_version='20m',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-20m.zip'),
parsing_fn=movielens_parsing.parse_current_ratings_data,
),
MovieLensConfig(
name='20m-movies',
description=textwrap.dedent("""\
This dataset contains data of 27,278 movies rated in the 20m
dataset"""),
version='0.1.0',
format_version='20m',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-20m.zip'),
parsing_fn=movielens_parsing.parse_current_movies_data,
),
]
VERSION = tfds.core.Version('0.1.0')
def _info(self) -> tfds.core.DatasetInfo:
"""Returns DatasetInfo according to self.builder_config."""
movie_features_dict = {
'movie_id':
tf.string,
'movie_title':
tf.string,
'movie_genres':
tfds.features.Sequence(
tfds.features.ClassLabel(names=[
'Action',
'Adventure',
'Animation',
'Children',
'Comedy',
'Crime',
'Documentary',
'Drama',
'Fantasy',
'Film-Noir',
'Horror',
'IMAX',
'Musical',
'Mystery',
'Romance',
'Sci-Fi',
'Thriller',
'Unknown',
'War',
'Western',
'(no genres listed)',
]),),
}
rating_features_dict = {
'user_id': tf.string,
'user_rating': tf.float32,
# Using int64 since tfds currently does not support float64.
'timestamp': tf.int64,
}
demographic_features_dict = {
'user_gender':
tf.bool,
'bucketized_user_age':
tf.float32,
'user_occupation_label':
tfds.features.ClassLabel(names=[
'academic/educator',
'artist',
'clerical/admin',
'customer service',
'doctor/health care',
'entertainment',
'executive/managerial',
'farmer',
'homemaker',
'lawyer',
'librarian',
'other/not specified',
'programmer',
'retired',
'sales/marketing',
'scientist',
'self-employed',
'student',
'technician/engineer',
'tradesman/craftsman',
'unemployed',
'writer',
]),
'user_occupation_text':
tf.string,
'user_zip_code':
tf.string,
}
features_dict = {}
if self.builder_config.table_option == 'movies':
features_dict.update(movie_features_dict)
# For the other cases, self.builder_config.table_option == 'ratings'.
# Older versions of MovieLens (1m, 100k) have demographic features.
elif self.builder_config.format_version == '1m':
features_dict.update(movie_features_dict)
features_dict.update(rating_features_dict)
features_dict.update(demographic_features_dict)
elif self.builder_config.format_version == '100k':
# Only the 100k dataset contains exact user ages. The 1m dataset
# contains only bucketized age values.
features_dict.update(movie_features_dict)
features_dict.update(rating_features_dict)
features_dict.update(demographic_features_dict)
features_dict.update(raw_user_age=tf.float32)
else:
features_dict.update(movie_features_dict)
features_dict.update(rating_features_dict)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features_dict),
supervised_keys=None,
homepage='https://grouplens.org/datasets/movielens/',
citation=_CITATION,
)
def _split_generators(
self, dl_manager: tfds.download.DownloadManager
) -> List[tfds.core.SplitGenerator]:
"""Returns SplitGenerators."""
extracted_path = dl_manager.download_and_extract(
self.builder_config.download_url,)
dir_path = os.path.join(
extracted_path,
'ml-%s' % self.builder_config.format_version,
)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={'dir_path': dir_path},
),
]
def _generate_examples(
self,
dir_path: Optional[str] = None) -> Iterator[Tuple[int, Dict[str, Any]]]:
"""Yields examples by calling the corresponding parsing function."""
for ex in self.builder_config.parsing_fn(dir_path):
yield ex
class MovieLens(Movielens):
"""MovieLens rating dataset (deprecated handle version)."""
def __init__(self, **kwargs):
logging.warning(
'The handle "movie_lens" for the MovieLens dataset is deprecated. '
'Prefer using "movielens" instead.')
super(MovieLens, self).__init__(**kwargs)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\nfrom future.builtins import str, object\nfrom .context import log, queue_job\nimport datetime\nimport ujson as json\nimport time\n\n\ndef _hash_task(task):\n \"\"\" Returns a unique hash for identify a task and its ... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\nfrom future.builtins import str, object\nfrom .context import log, queue_job\nimport datetime\nimport ujson as json\nimport time\n\n\ndef _hash_task(task):\n \"\"\" Returns a unique hash for identify... | ```python
from future.builtins import str, object
from .context import log, queue_job
import datetime
import ujson as json
import time
def _hash_task(task):
""" Returns a unique hash for identify a task and its params """
params = task.get("params")
if params:
params = json.dumps(sorted(list(task["params"].items()), key=lambda x: x[0])) # pylint: disable=no-member
full = [str(task.get(x)) for x in ["path", "interval", "dailytime", "weekday", "monthday", "queue"]]
full.extend([str(params)])
return " ".join(full)
class Scheduler(object):
def __init__(self, collection, config_tasks):
self.collection = collection
self.config_tasks = config_tasks
self.config_synced = False
self.all_tasks = []
def check_config_integrity(self):
""" Make sure the scheduler config is valid """
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks}
if len(tasks_by_hash) != len(self.config_tasks):
raise Exception("Fatal error: there was a hash duplicate in the scheduled tasks config.")
for h, task in tasks_by_hash.items():
if task.get("monthday") and not task.get("dailytime"):
raise Exception("Fatal error: you can't schedule a task with 'monthday' and without 'dailytime' (%s)" % h)
if task.get("weekday") and not task.get("dailytime"):
raise Exception("Fatal error: you can't schedule a task with 'weekday' and without 'dailytime' (%s)" % h)
if not task.get("monthday") and not task.get("weekday") and not task.get("dailytime") and not task.get("interval"):
raise Exception("Fatal error: scheduler must be specified one of monthday,weekday,dailytime,interval. (%s)" % h)
def sync_config_tasks(self):
""" Performs the first sync of a list of tasks, often defined in the config file. """
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks}
for task in self.all_tasks:
if tasks_by_hash.get(task["hash"]):
del tasks_by_hash[task["hash"]]
else:
self.collection.remove({"_id": task["_id"]})
log.debug("Scheduler: deleted %s" % task["hash"])
# What remains are the new ones to be inserted
for h, task in tasks_by_hash.items():
task["hash"] = h
task["datelastqueued"] = datetime.datetime.fromtimestamp(0)
if task.get("dailytime"):
# Because MongoDB can store datetimes but not times,
# we add today's date to the dailytime.
# The date part will be discarded in check()
task["dailytime"] = datetime.datetime.combine(
datetime.datetime.utcnow(), task["dailytime"])
task["interval"] = 3600 * 24
# Avoid to queue task in check() if today dailytime is already passed
if datetime.datetime.utcnow().time() > task["dailytime"].time():
task["datelastqueued"] = datetime.datetime.utcnow()
self.collection.find_one_and_update({"hash": task["hash"]}, {"$set": task}, upsert=True)
log.debug("Scheduler: added %s" % task["hash"])
def check(self):
self.all_tasks = list(self.collection.find())
if not self.config_synced:
self.sync_config_tasks()
self.all_tasks = list(self.collection.find())
self.config_synced = True
# log.debug(
# "Scheduler checking for out-of-date scheduled tasks (%s scheduled)..." %
# len(self.all_tasks)
# )
now = datetime.datetime.utcnow()
current_weekday = now.weekday()
current_monthday = now.day
for task in self.all_tasks:
interval = datetime.timedelta(seconds=task["interval"])
if task["datelastqueued"] >= now:
continue
if task.get("monthday", current_monthday) != current_monthday:
continue
if task.get("weekday", current_weekday) != current_weekday:
continue
if task.get("dailytime"):
if task["datelastqueued"].date() == now.date() or now.time() < task["dailytime"].time():
continue
# if we only have "interval" key
if all(k not in task for k in ["monthday", "weekday", "dailytime"]):
if now - task["datelastqueued"] < interval:
continue
queue_job(
task["path"],
task.get("params") or {},
queue=task.get("queue")
)
self.collection.update({"_id": task["_id"]}, {"$set": {
"datelastqueued": now
}})
log.debug("Scheduler: queued %s" % _hash_task(task))
# Make sure we never again execute a scheduler with the same exact second.
time.sleep(1)
``` |
[
{
"content": "```python\n#-------------------------------------------------------------------------------\n# Copyright (c) 2011 Anton Golubkov.\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the GNU Lesser Public License v2.1\n# which accompanies thi... | [
{
"content": "<|memory_start|>```python\n#-------------------------------------------------------------------------------\n# Copyright (c) 2011 Anton Golubkov.\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the GNU Lesser Public License v2.1\n# which... | ```python
#-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
import ipf.ipfblock.processing
import ipfblock
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
import ioport
from ipf.ipftype.ipfmatchtemplatemethodtype import IPFMatchTemplateMethodType
from property import Property
class MatchTemplate(ipfblock.IPFBlock):
""" Calculate template correlation map
"""
type = "MatchTemplate"
category = "Feature detection"
is_abstract_block = False
def __init__(self):
super(MatchTemplate, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.input_ports["input_template"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.properties["method"] = Property(IPFMatchTemplateMethodType)
self.processing_function = ipf.ipfblock.processing.match_template
def get_preview_image(self):
return self.output_ports["output_image"]._value
``` |
[
{
"content": "```python\n# -*- coding=utf8 -*-\n#******************************************************************************\n# addon.py\n#------------------------------------------------------------------------------\n#\n# Copyright (c) 2013 LivingOn <LivingOn@xmail.net>\n#\n# This program is free software;... | [
{
"content": "<|memory_start|>```python\n# -*- coding=utf8 -*-\n#******************************************************************************\n# addon.py\n#------------------------------------------------------------------------------\n#\n# Copyright (c) 2013 LivingOn <LivingOn@xmail.net>\n#\n# This program i... | ```python
# -*- coding=utf8 -*-
#******************************************************************************
# addon.py
#------------------------------------------------------------------------------
#
# Copyright (c) 2013 LivingOn <LivingOn@xmail.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#******************************************************************************
import sys
import urllib
import xbmcgui
import xbmcaddon
import xbmcplugin
from resources.lib.NachtschichtBhvParser import NachtschichtBhvParser
class NachtschichtBhv(object):
PLUGIN_NAME = "plugin.image.nachtschicht-bhv"
_plugin_id = None
_addon = None
def __init__(self):
self._register_addon()
self._process_request()
def _register_addon(self):
self._plugin_id = int(sys.argv[1])
self._addon = xbmcaddon.Addon(id = self.PLUGIN_NAME)
def _process_request(self):
if not sys.argv[2]:
self._create_gallery_list()
else:
self._create_picture_list(sys.argv[2])
def _create_gallery_list(self):
liste = NachtschichtBhvParser.get_gallery_list()
items = []
for (url, title, thumb) in liste:
url = sys.argv[0] + "?" + urllib.urlencode({'url' : url})
items.append((url, xbmcgui.ListItem(title, iconImage=thumb), True,))
xbmcplugin.addDirectoryItems(self._plugin_id, items)
xbmcplugin.endOfDirectory(self._plugin_id, cacheToDisc=True)
def _create_picture_list(self, url):
count = 1
items = []
url = urllib.unquote(url[5:])
for (url, thumb) in NachtschichtBhvParser.get_picture_list(url):
items.append((url, xbmcgui.ListItem("-%d-" % count, iconImage=thumb), False,))
count += 1
xbmcplugin.addDirectoryItems(self._plugin_id, items)
xbmcplugin.endOfDirectory(self._plugin_id, cacheToDisc=False)
if __name__ == "__main__":
NachtschichtBhv()
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n# Licensed under the Apache License,... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n# Licensed under the... | ```python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
from oslo.config import cfg
import webob.exc
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _('Unexpected error while running command.')
if exit_code is None:
exit_code = '-'
message = _('%(description)s\nCommand: %(cmd)s\n'
'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
'Stderr: %(stderr)r') % locals()
IOError.__init__(self, message)
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return dict((k, v) for k, v in original.iteritems() if not "_pass" in k)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, *args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(NovaException, self).__init__(message)
def format_message(self):
if self.__class__.__name__.endswith('_Remote'):
return self.args[0]
else:
return unicode(self)
class EC2APIError(NovaException):
message = _("Unknown")
def __init__(self, message=None, code=None):
self.msg = message
self.code = code
outstr = '%s' % message
super(EC2APIError, self).__init__(outstr)
class EncryptionFailure(NovaException):
message = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text: %(reason)s")
class VirtualInterfaceCreateException(NovaException):
message = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
message = _("5 attempts to create virtual interface"
"with unique mac address failed")
class GlanceConnectionFailed(NovaException):
message = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class NotAuthorized(NovaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotActive(NovaException):
message = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
message = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
message = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
message = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts.")
class InvalidKeypair(Invalid):
message = _("Keypair data is invalid")
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
class InvalidMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
class InvalidMetadataSize(Invalid):
message = _("Invalid metadata size") + ": %(reason)s"
class InvalidPortRange(Invalid):
message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
message = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
message = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAggregateAction(Invalid):
message = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
message = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
message = _("Sort key supplied was not valid.")
class InstanceInvalidState(Invalid):
message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
message = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
message = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
message = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
message = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
message = _("Failed to suspend instance") + ": %(reason)s"
class InstanceResumeFailure(Invalid):
message = _("Failed to resume instance: %(reason)s.")
class InstancePowerOnFailure(Invalid):
message = _("Failed to power on instance: %(reason)s.")
class InstancePowerOffFailure(Invalid):
message = _("Failed to power off instance: %(reason)s.")
class InstanceRebootFailure(Invalid):
message = _("Failed to reboot instance") + ": %(reason)s"
class InstanceTerminationFailure(Invalid):
message = _("Failed to terminate instance") + ": %(reason)s"
class InstanceDeployFailure(Invalid):
message = _("Failed to deploy instance") + ": %(reason)s"
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
message = _("Insufficient compute resources.")
class ComputeServiceUnavailable(ServiceUnavailable):
message = _("Compute service of %(host)s is unavailable at this time.")
class UnableToMigrateToSelf(Invalid):
message = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
message = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
message = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
message = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
message = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
message = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
message = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
message = _("Unacceptable CPU info") + ": %(reason)s"
class InvalidIpAddressError(Invalid):
message = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
message = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
message = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
message = _("Disk format %(disk_format)s is not acceptable")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
message = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
message = _("Invalid ID received %(id)s.")
class InvalidPeriodicTaskArg(Invalid):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
class ConstraintNotMet(NovaException):
message = _("Constraint not met.")
code = 412
class NotFound(NovaException):
message = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
message = _("No agent-build associated with id %(id)s.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
message = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ImageNotFoundEC2(ImageNotFound):
message = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
message = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
message = _("Cannot find SR to read/write VDI.")
class NetworkDuplicated(NovaException):
message = _("Network %(network_id)s is duplicated.")
class NetworkInUse(NovaException):
message = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
message = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
message = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
message = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
message = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
message = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
message = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
message = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
message = _("No networks defined.")
class NetworkNotFoundForProject(NotFound):
message = _("Either Network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(NovaException):
message = _("Port %(port_id)s is still in use.")
class PortNotUsable(NovaException):
message = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(NovaException):
message = _("No free port available for instance %(instance)s.")
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
message = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
message = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
message = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
message = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
message = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
message = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
message = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
message = _("Zero fixed ips could be found.")
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
class FloatingIpExists(Duplicate):
message = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
message = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
message = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
message = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
message = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
message = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
message = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
message = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
message = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
message = _("Interface %(interface)s not found.")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
message = _("Cannot disassociate auto assigined floating ip")
class KeypairNotFound(NotFound):
message = _("Keypair %(name)s not found for user %(user_id)s")
class CertificateNotFound(NotFound):
message = _("Certificate %(certificate_id)s not found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
message = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
message = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
message = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
message = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
message = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
message = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
message = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class NoUniqueMatch(NovaException):
message = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
message = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolNotFoundForHostType(NotFound):
message = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
message = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
message = _("Invalid console type %(console_type)s")
class InstanceTypeNotFound(NotFound):
message = _("Instance type %(instance_type_id)s could not be found.")
class InstanceTypeNotFoundByName(InstanceTypeNotFound):
message = _("Instance type with name %(instance_type_name)s "
"could not be found.")
class FlavorNotFound(NotFound):
message = _("Flavor %(flavor_id)s could not be found.")
class FlavorAccessNotFound(NotFound):
message = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class CellNotFound(NotFound):
message = _("Cell %(cell_name)s doesn't exist.")
class CellRoutingInconsistency(NovaException):
message = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
message = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
message = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
message = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
message = _("No cells available matching scheduling criteria.")
class CellError(NovaException):
message = _("Exception received during cell processing: %(exc_name)s.")
class InstanceUnknownCell(NotFound):
message = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class InstanceMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceSystemMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no system metadata with "
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
message = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
message = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
message = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
message = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
message = _("Action not allowed.")
class ImageRotationNotAllowed(NovaException):
message = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
message = _("Rotation param is required for backup image_type")
class KeyPairExists(Duplicate):
message = _("Key pair %(key_name)s already exists.")
class InstanceExists(Duplicate):
message = _("Instance %(name)s already exists.")
class InstanceTypeExists(Duplicate):
message = _("Instance Type with name %(name)s already exists.")
class InstanceTypeIdExists(Duplicate):
message = _("Instance Type with ID %(flavor_id)s already exists.")
class FlavorAccessExists(Duplicate):
message = _("Flavor access alreay exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
message = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
message = _("%(path)s is not on local storage: %(reason)s")
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
message = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
message = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
message = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
message = _("Resize error: %(reason)s")
class ImageTooLarge(NovaException):
message = _("Image is larger than instance type allows")
class InstanceTypeMemoryTooSmall(NovaException):
message = _("Instance type's memory is too small for requested image.")
class InstanceTypeDiskTooSmall(NovaException):
message = _("Instance type's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
message = _("Insufficient free memory on compute node to start %(uuid)s.")
class CouldNotFetchMetrics(NovaException):
message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
class QuotaError(NovaException):
message = _("Quota exceeded") + ": code=%(code)s"
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
message = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
message = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
message = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
message = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
message = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
message = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
message = _("Maximum number of security groups or rules exceeded")
class AggregateError(NovaException):
message = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(Duplicate):
message = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(Duplicate):
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
class InstancePasswordSetFailed(NovaException):
message = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class DuplicateVlan(Duplicate):
message = _("Detected existing vlan with id %(vlan)d")
class CidrConflict(NovaException):
message = _("There was a conflict when trying to complete your request.")
code = 409
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
message = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class NodeNotFound(NotFound):
message = _("Node %(node_id)s could not be found.")
class NodeNotFoundByUUID(NotFound):
message = _("Node with UUID %(node_uuid)s could not be found.")
class MarkerNotFound(NotFound):
message = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
message = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
message = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
message = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
message = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
message = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
message = _("Instance %(instance_uuid)s is locked")
class ConfigDriveMountFailed(NovaException):
message = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
message = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
message = _("Failed to attach network adapter device to %(instance)s")
class InterfaceDetachFailed(Invalid):
message = _("Failed to detach network adapter device from %(instance)s")
class InstanceUserDataTooLarge(NovaException):
message = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
message = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(NovaException):
message = _("unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class InstanceActionNotFound(NovaException):
message = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
message = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
message = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
message = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
message = _('Instance recreate is not implemented by this virt driver.')
class ServiceGroupUnavailable(NovaException):
message = _("The service from servicegroup driver %(driver) is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
message = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
message = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
message = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
message = _("Invalid Base 64 data for file %(path)s")
``` |
[
{
"content": "Repeat the code exactly:\n```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Binary Indexed Tree\n -------------------\n\n Based on the proposed C/C++ implementation by\n Yannis Chatzimichos @ https://git.softlab.ntua.gr/public/pdp-camp/blob/master/2013/advanced_data_structures.pdf\n\n ... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Binary Indexed Tree\n -------------------\n\n Based on the proposed C/C++ implementation by\n Yannis Chatzimichos @ https://git.softlab.ntua.gr/public/pdp-camp/blob/master/2013/advanced_data_struct... | ```python
# -*- coding: utf-8 -*-
"""
Binary Indexed Tree
-------------------
Based on the proposed C/C++ implementation by
Yannis Chatzimichos @ https://git.softlab.ntua.gr/public/pdp-camp/blob/master/2013/advanced_data_structures.pdf
Supports the operations
- ADD S X
Add X to the sum at position S
- SUM X Y
Finds the sum from position X to position Y
The binary indexed tree is 1-indexed.
Time Complexity
All the operations cost O( logn ), where n is the number of bits of the position.
"""
class BIT:
def __init__(self, n):
self.bit = (n + 1) * [0] # Position 0 is not used.
self.n = n
def add(self, pos, x):
while pos <= self.n:
self.bit[pos] += x
pos += (pos & -pos)
def sum(self, x, y):
sy = self.sumFromOne(y)
return sy if x == 1 else sy - self.sumFromOne(x)
def sumFromOne(self, pos):
sum = 0
while pos > 0:
sum += self.bit[pos]
pos -= (pos & -pos)
return sum
if __name__ == "__main__":
xs = [7,0,3,2,3,0,0,4,6,3,2,8]
n = len(xs)
bit = BIT(n)
for i in range(1, n+1):
bit.add(i, xs[i-1])
assert ([bit.sum(1, x) for x in range(1, n+1)]) == [sum(xs[0:n]) for n in range(1, n+1)]
bit.add(5, 3)
assert bit.sum(2, n) == sum(xs[2:]) + 3
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\nfrom __future__ import absolute_import, unicode_literals\n\nfrom django.core.urlresolvers import reverse\nfrom django.test.utils import override_settings\nfrom django.utils import timezone\nfrom mock import patch\nfrom temba.types import Broadcast\n... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nfrom __future__ import absolute_import, unicode_literals\n\nfrom django.core.urlresolvers import reverse\nfrom django.test.utils import override_settings\nfrom django.utils import timezone\nfrom mock import patch\nfrom temba.types im... | ```python
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.utils import timezone
from mock import patch
from temba.types import Broadcast
from tracpro.msgs.models import Message, COHORT_ALL, COHORT_RESPONDENTS, COHORT_NONRESPONDENTS
from tracpro.polls.models import Issue, Response, RESPONSE_COMPLETE, RESPONSE_PARTIAL, RESPONSE_EMPTY
from tracpro.test import TracProTest
class MessageTest(TracProTest):
@override_settings(CELERY_ALWAYS_EAGER=True, CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, BROKER_BACKEND='memory')
@patch('dash.orgs.models.TembaClient.create_broadcast')
def test_create(self, mock_create_broadcast):
mock_create_broadcast.return_value = Broadcast.create()
now = timezone.now()
# create non-regional issue with 3 responses (1 complete, 1 partial, 1 empty)
issue1 = Issue.objects.create(poll=self.poll1, region=None, conducted_on=timezone.now())
Response.objects.create(flow_run_id=123, issue=issue1, contact=self.contact1,
created_on=now, updated_on=now, status=RESPONSE_COMPLETE)
Response.objects.create(flow_run_id=234, issue=issue1, contact=self.contact2,
created_on=now, updated_on=now, status=RESPONSE_PARTIAL)
Response.objects.create(flow_run_id=345, issue=issue1, contact=self.contact4,
created_on=now, updated_on=now, status=RESPONSE_EMPTY)
msg1 = Message.create(self.unicef, self.admin, "Test #1", issue1, COHORT_ALL, None)
self.assertEqual(msg1.org, self.unicef)
self.assertEqual(msg1.sent_by, self.admin)
self.assertIsNotNone(msg1.sent_on)
self.assertEqual(msg1.text, "Test #1")
self.assertEqual(msg1.issue, issue1)
self.assertEqual(msg1.cohort, COHORT_ALL)
self.assertEqual(msg1.region, None)
self.assertEqual(list(msg1.recipients.order_by('pk')), [self.contact1, self.contact2, self.contact4])
self.assertEqual(unicode(msg1), "Test #1")
self.assertEqual(msg1.as_json(), dict(id=msg1.pk, recipients=3))
msg2 = Message.create(self.unicef, self.admin, "Test #1", issue1, COHORT_RESPONDENTS, None)
self.assertEqual(msg2.cohort, COHORT_RESPONDENTS)
self.assertEqual(msg2.region, None)
self.assertEqual(list(msg2.recipients.order_by('pk')), [self.contact1])
msg3 = Message.create(self.unicef, self.admin, "Test #1", issue1, COHORT_NONRESPONDENTS, None)
self.assertEqual(msg3.cohort, COHORT_NONRESPONDENTS)
self.assertEqual(msg3.region, None)
self.assertEqual(list(msg3.recipients.order_by('pk')), [self.contact2, self.contact4])
msg4 = Message.create(self.unicef, self.admin, "Test #1", issue1, COHORT_ALL, self.region1)
self.assertEqual(msg4.cohort, COHORT_ALL)
self.assertEqual(msg4.region, self.region1)
self.assertEqual(list(msg4.recipients.order_by('pk')), [self.contact1, self.contact2])
class MessageCRUDLTest(TracProTest):
def test_list(self):
url = reverse('msgs.message_list')
# create a non-regional issue
issue1 = Issue.objects.create(poll=self.poll1, region=None, conducted_on=timezone.now())
# send 1 message to all regions and 2 more to specific regions
msg1 = Message.create(self.unicef, self.admin, "Test to all", issue1, COHORT_ALL, None)
msg2 = Message.create(self.unicef, self.admin, "Test to region #1", issue1, COHORT_ALL, self.region1)
msg3 = Message.create(self.unicef, self.admin, "Test to region #2", issue1, COHORT_ALL, self.region2)
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(list(response.context['object_list']), [msg3, msg2, msg1])
self.switch_region(self.region1)
# should still include message sent to all regions
response = self.url_get('unicef', url)
self.assertEqual(list(response.context['object_list']), [msg2, msg1])
``` |
[
{
"content": "Here is the snippet:\n```python\n#!user/bin/env python\n# -*- coding: utf8 -*- \n'''\nCreated on Jul 11, 2012\n\n@author: joseph\n'''\nimport urllib,urllib2\nimport mimetypes\n\n\ndef uploadfile(fields, files):\n BOUNDARY = '----------267402204411258'\n CRLF = '\\r\\n'\n L = []\n for (... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#!user/bin/env python\n# -*- coding: utf8 -*- \n'''\nCreated on Jul 11, 2012\n\n@author: joseph\n'''\nimport urllib,urllib2\nimport mimetypes\n\n\ndef uploadfile(fields, files):\n BOUNDARY = '----------267402204411258'\n CRLF = '\\r\\n'\n L... | ```python
#!user/bin/env python
# -*- coding: utf8 -*-
'''
Created on Jul 11, 2012
@author: joseph
'''
import urllib,urllib2
import mimetypes
def uploadfile(fields, files):
BOUNDARY = '----------267402204411258'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % mimetypes.guess_type(filename)[0] or 'application/octet-stream')
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
if __name__ == '__main__':
fields=[
('_xsrf','28d55624808042768af23188e318500a')
]
ifile = "/home/joseph/Pictures/1.jpg"
imgdata= file(ifile,"rb")
files=[
('ifile',imgdata.name,imgdata.read())
]
content_type, upload_data = uploadfile(fields, files)
uploadheader={
"User-Agent": "Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
'Content-Type': content_type,
'Content-Length': str(len(upload_data))
}
request = urllib2.Request("http://localhost/upload/", upload_data, uploadheader)
res = urllib2.urlopen(request)
print res.read()
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n# Copyright 2019 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\n\"\"\"This file is a recipe demonstrating reading trigger... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# Copyright 2019 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\n\"\"\"This file is a recipe demonstrating... | ```python
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""This file is a recipe demonstrating reading triggers of the current build."""
import json
from google.protobuf import json_format
from PB.go.chromium.org.luci.scheduler.api.scheduler.v1 import (
triggers as triggers_pb2)
DEPS = [
'scheduler',
'step',
]
def RunSteps(api):
pres = api.step(name='triggers', cmd=None).presentation
pres.logs['triggers'] = json.dumps(
[json_format.MessageToDict(t) for t in api.scheduler.triggers],
sort_keys=True,
indent=2,
).splitlines()
if api.scheduler.triggers and api.scheduler.triggers[0].gitiles.repo:
pres.logs['first_repo'] = [api.scheduler.triggers[0].gitiles.repo]
def GenTests(api):
yield (
api.test('unset')
)
yield (
api.test('gitiles') +
api.scheduler(
triggers=[
triggers_pb2.Trigger(
id='a',
gitiles=dict(
repo='https://chromium.googlesource.com/chomium/src',
ref='refs/heads/main',
revision='a' * 40,
),
),
triggers_pb2.Trigger(
id='b',
gitiles=dict(
repo='https://chromium.googlesource.com/chomium/src',
ref='refs/heads/main',
revision='b' * 40,
),
),
],
)
)
bb_trigger = triggers_pb2.BuildbucketTrigger(tags=['a:b'])
bb_trigger.properties.update({'foo': 'bar'})
yield (
api.test('various') +
api.scheduler(
triggers=[
triggers_pb2.Trigger(id='a', cron=dict(generation=123)),
triggers_pb2.Trigger(id='b', webui=dict()),
triggers_pb2.Trigger(id='c', buildbucket=bb_trigger),
],
)
)
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport falcon\nimport pytest\nfrom falcon import testing\nfrom falcon.cmd.print_routes import print_routes\n\nfrom falcon_resourc... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport falcon\nimport pytest\nfrom falcon import testing\nfrom falcon.cmd.print_routes import print_routes\n\nfro... | ```python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import falcon
import pytest
from falcon import testing
from falcon.cmd.print_routes import print_routes
from falcon_resource_factory import ResourceFactory
def detail_view(resource, req, res, **kwargs):
res.body = '{0} Called'.format(req.method)
def list_view(resource, req, res, **kwargs):
res.body = '{0} list Called'.format(req.method)
def resource_creator(detail_methods, list_methods,
detail_method_map=ResourceFactory.DETAIL_METHOD_MAP,
list_method_map=ResourceFactory.LIST_METHOD_MAP,
custom_views=None, params=None):
test_resource = type('TestResource', (), params or {})
for method in detail_methods:
func_name = detail_method_map[method]
setattr(test_resource, func_name, detail_view)
for method in list_methods:
func_name = list_method_map[method]
setattr(test_resource, func_name, list_view)
return test_resource()
@pytest.fixture()
def app():
return falcon.API()
@pytest.fixture
def client(app):
return testing.TestClient(app)
@pytest.fixture()
def resource_factory():
return ResourceFactory()
def _test_detail_routes(app, client, expected_params):
resource, method_map, params, uri_template = app._router.find('/items/5')
assert expected_params == params
assert resource.__class__.__name__ == 'TestResourceDetail'
assert hasattr(resource, 'on_get')
assert hasattr(resource, 'on_post')
response = client.simulate_get('/items/5')
assert response.content.lower() == b'get called'
response = client.simulate_post('/items/5')
assert response.content.lower() == b'post called'
def test_detail_routes(app, resource_factory, client):
res = resource_creator(['GET', 'POST'], [])
resource_factory.add_routes(app, '/items/', res)
expected_params = {'id': '5'}
_test_detail_routes(app, client, expected_params)
def test_detail_routes_custom_identifier(app, client):
resource_factory = ResourceFactory(detail_identifier='uuid')
res = resource_creator(['GET', 'POST'], [])
resource_factory.add_routes(app, '/items/', res)
expected_params = {'uuid': '5'}
_test_detail_routes(app, client, expected_params)
def test_detail_routes_custom_method_map(app, client):
detail_method_map = {
'GET': 'obj_get',
'POST': 'obj_post'
}
resource_factory = ResourceFactory(detail_method_map=detail_method_map)
res = resource_creator(detail_method_map.keys(), [],
detail_method_map=detail_method_map)
resource_factory.add_routes(app, '/items/', res)
expected_params = {'id': '5'}
_test_detail_routes(app, client, expected_params)
def _test_list_routes(app, client):
resource, method_map, params, uri_template = app._router.find('/items')
assert hasattr(resource, 'on_get')
assert hasattr(resource, 'on_put')
assert resource.__class__.__name__ == 'TestResourceList'
response = client.simulate_get('/items/')
assert response.content.lower() == b'get list called'
response = client.simulate_put('/items/')
assert response.content.lower() == b'put list called'
def test_list_routes(app, resource_factory, client):
res = resource_creator([], ['GET', 'PUT'])
resource_factory.add_routes(app, '/items/', res)
_test_list_routes(app, client)
def test_list_routes_custom_method_map(app, client):
list_method_map = {
'GET': 'obj_get_list',
'PUT': 'obj_put_list'
}
resource_factory = ResourceFactory(list_method_map=list_method_map)
res = resource_creator([], list_method_map.keys(),
list_method_map=list_method_map)
resource_factory.add_routes(app, '/items/', res)
_test_list_routes(app, client)
def test_generated_resources_has_params(app, resource_factory, client):
const_parmas = {
'PARAM_1': '1',
'PARAM_2': '2',
}
hidden_params = {
'__x': 'hidden',
'func': lambda: None
}
params = dict(const_parmas)
params.update(dict(hidden_params))
res = resource_creator(['GET'], ['GET'], params=params)
resource_factory.add_routes(app, '/items/', res)
list_resource, _, _, _ = app._router.find('/items')
list_resource_cls = list_resource.__class__
for key, val in const_parmas.items():
assert getattr(list_resource_cls, key) == val
for key in hidden_params.keys():
assert not hasattr(list_resource_cls, key)
``` |
[
{
"content": "```python\nimport sys\nimport os.path\nfrom PIL import Image\nimport numpy as np\nfrom arg_parser import parse_args\n\nfrom printer import print_header, print_usage, print_line\n\ndef init_images(app_data):\n print(\"[init.py] : initializing images...\")\n\n app_args = app_data['app_args']\n... | [
{
"content": "<|memory_start|>```python\nimport sys\nimport os.path\nfrom PIL import Image\nimport numpy as np\nfrom arg_parser import parse_args\n\nfrom printer import print_header, print_usage, print_line\n\ndef init_images(app_data):\n print(\"[init.py] : initializing images...\")\n\n app_args = app_da... | ```python
import sys
import os.path
from PIL import Image
import numpy as np
from arg_parser import parse_args
from printer import print_header, print_usage, print_line
def init_images(app_data):
print("[init.py] : initializing images...")
app_args = app_data['app_args']
# input image:
img_path = app_args.img_file
image = np.array(Image.open(img_path))
img_path2 = app_args.alpha_file
alpha = np.array(Image.open(img_path2))
if image.shape[0] != alpha.shape[0] or image.shape[1] != alpha.shape[1]:
print("Please use alpha image with the same shape as the image")
sys.exit(0)
R = image.shape[0]
C = image.shape[1]
image_flip = np.rollaxis(image, 2)
# add alpha channel to image along with other colour channels
imgalpha = np.append(image_flip, alpha)
imgalpha = imgalpha.reshape(4, R, C)
imgalpha_region = imgalpha[0:4, 0:R, 0:C]
# add ghost region
imgalpha_ghost = np.empty((4, R+2, C+2), np.float32)
imgalpha_ghost[0:4, 1:R+1, 1:C+1] = imgalpha_region
# convert input image to floating point
imgalpha_f = np.float32(imgalpha_ghost) / 255.0
# result array
res = np.empty((3, R, C), np.float32)
img_data = {}
img_data['IN'] = imgalpha_f
img_data['OUT'] = res
app_data['img_data'] = img_data
app_data['R'] = R
app_data['C'] = C
return
def get_input(app_data):
# parse the command-line arguments
app_args = parse_args()
app_data['app_args'] = app_args
app_data['mode'] = app_args.mode
app_data['runs'] = int(app_args.runs)
app_data['graph_gen'] = bool(app_args.graph_gen)
app_data['timer'] = app_args.timer
# storage optimization
app_data['optimize_storage'] = bool(app_args.optimize_storage)
# early freeing of allocated arrays
app_data['early_free'] = bool(app_args.early_free)
# pool allocate option
app_data['pool_alloc'] = bool(app_args.pool_alloc)
return
def init_all(app_data):
pipe_data = {}
app_data['pipe_data'] = pipe_data
get_input(app_data)
init_images(app_data)
return
``` |
[
{
"content": "Repeat the following code:\n```python\n# Guillaume Valadon <guillaume@valadon.net>\n\n\"\"\"\nScapy *BSD native support - BPF sockets\n\"\"\"\n\nfrom ctypes import c_long, sizeof\nimport errno\nimport fcntl\nimport os\nimport platform\nfrom select import select\nimport struct\nimport time\n\nfrom ... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# Guillaume Valadon <guillaume@valadon.net>\n\n\"\"\"\nScapy *BSD native support - BPF sockets\n\"\"\"\n\nfrom ctypes import c_long, sizeof\nimport errno\nimport fcntl\nimport os\nimport platform\nfrom select import select\nimport struct\nimpo... | ```python
# Guillaume Valadon <guillaume@valadon.net>
"""
Scapy *BSD native support - BPF sockets
"""
from ctypes import c_long, sizeof
import errno
import fcntl
import os
import platform
from select import select
import struct
import time
from scapy.arch.bpf.core import get_dev_bpf, attach_filter
from scapy.arch.bpf.consts import BIOCGBLEN, BIOCGDLT, BIOCGSTATS, \
BIOCIMMEDIATE, BIOCPROMISC, BIOCSBLEN, BIOCSETIF, BIOCSHDRCMPLT, \
BPF_BUFFER_LENGTH, BIOCSDLT, DLT_IEEE802_11_RADIO
from scapy.config import conf
from scapy.consts import FREEBSD, NETBSD, DARWIN
from scapy.data import ETH_P_ALL
from scapy.error import Scapy_Exception, warning
from scapy.interfaces import network_name
from scapy.supersocket import SuperSocket
from scapy.compat import raw
from scapy.layers.l2 import Loopback
if FREEBSD:
# On 32bit architectures long might be 32bit.
BPF_ALIGNMENT = sizeof(c_long)
elif NETBSD:
BPF_ALIGNMENT = 8 # sizeof(long)
else:
BPF_ALIGNMENT = 4 # sizeof(int32_t)
# SuperSockets definitions
class _L2bpfSocket(SuperSocket):
""""Generic Scapy BPF Super Socket"""
desc = "read/write packets using BPF"
nonblocking_socket = True
def __init__(self, iface=None, type=ETH_P_ALL, promisc=None, filter=None,
nofilter=0, monitor=False):
self.fd_flags = None
self.assigned_interface = None
# SuperSocket mandatory variables
if promisc is None:
self.promisc = conf.sniff_promisc
else:
self.promisc = promisc
self.iface = network_name(iface or conf.iface)
# Get the BPF handle
self.ins = None
(self.ins, self.dev_bpf) = get_dev_bpf()
self.outs = self.ins
# Set the BPF buffer length
try:
fcntl.ioctl(self.ins, BIOCSBLEN, struct.pack('I', BPF_BUFFER_LENGTH)) # noqa: E501
except IOError:
raise Scapy_Exception("BIOCSBLEN failed on /dev/bpf%i" %
self.dev_bpf)
# Assign the network interface to the BPF handle
try:
fcntl.ioctl(self.ins, BIOCSETIF, struct.pack("16s16x", self.iface.encode())) # noqa: E501
except IOError:
raise Scapy_Exception("BIOCSETIF failed on %s" % self.iface)
self.assigned_interface = self.iface
# Set the interface into promiscuous
if self.promisc:
self.set_promisc(1)
# Set the interface to monitor mode
# Note: - trick from libpcap/pcap-bpf.c - monitor_mode()
# - it only works on OS X 10.5 and later
if DARWIN and monitor:
dlt_radiotap = struct.pack('I', DLT_IEEE802_11_RADIO)
try:
fcntl.ioctl(self.ins, BIOCSDLT, dlt_radiotap)
except IOError:
raise Scapy_Exception("Can't set %s into monitor mode!" %
self.iface)
# Don't block on read
try:
fcntl.ioctl(self.ins, BIOCIMMEDIATE, struct.pack('I', 1))
except IOError:
raise Scapy_Exception("BIOCIMMEDIATE failed on /dev/bpf%i" %
self.dev_bpf)
# Scapy will provide the link layer source address
# Otherwise, it is written by the kernel
try:
fcntl.ioctl(self.ins, BIOCSHDRCMPLT, struct.pack('i', 1))
except IOError:
raise Scapy_Exception("BIOCSHDRCMPLT failed on /dev/bpf%i" %
self.dev_bpf)
# Configure the BPF filter
filter_attached = False
if not nofilter:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if filter is not None:
try:
attach_filter(self.ins, filter, self.iface)
filter_attached = True
except ImportError as ex:
warning("Cannot set filter: %s" % ex)
if NETBSD and filter_attached is False:
# On NetBSD, a filter must be attached to an interface, otherwise
# no frame will be received by os.read(). When no filter has been
# configured, Scapy uses a simple tcpdump filter that does nothing
# more than ensuring the length frame is not null.
filter = "greater 0"
try:
attach_filter(self.ins, filter, self.iface)
except ImportError as ex:
warning("Cannot set filter: %s" % ex)
# Set the guessed packet class
self.guessed_cls = self.guess_cls()
def set_promisc(self, value):
"""Set the interface in promiscuous mode"""
try:
fcntl.ioctl(self.ins, BIOCPROMISC, struct.pack('i', value))
except IOError:
raise Scapy_Exception("Cannot set promiscuous mode on interface "
"(%s)!" % self.iface)
def __del__(self):
"""Close the file descriptor on delete"""
# When the socket is deleted on Scapy exits, __del__ is
# sometimes called "too late", and self is None
if self is not None:
self.close()
def guess_cls(self):
"""Guess the packet class that must be used on the interface"""
# Get the data link type
try:
ret = fcntl.ioctl(self.ins, BIOCGDLT, struct.pack('I', 0))
ret = struct.unpack('I', ret)[0]
except IOError:
cls = conf.default_l2
warning("BIOCGDLT failed: unable to guess type. Using %s !",
cls.name)
return cls
# Retrieve the corresponding class
try:
return conf.l2types[ret]
except KeyError:
cls = conf.default_l2
warning("Unable to guess type (type %i). Using %s", ret, cls.name)
def set_nonblock(self, set_flag=True):
"""Set the non blocking flag on the socket"""
# Get the current flags
if self.fd_flags is None:
try:
self.fd_flags = fcntl.fcntl(self.ins, fcntl.F_GETFL)
except IOError:
warning("Cannot get flags on this file descriptor !")
return
# Set the non blocking flag
if set_flag:
new_fd_flags = self.fd_flags | os.O_NONBLOCK
else:
new_fd_flags = self.fd_flags & ~os.O_NONBLOCK
try:
fcntl.fcntl(self.ins, fcntl.F_SETFL, new_fd_flags)
self.fd_flags = new_fd_flags
except Exception:
warning("Can't set flags on this file descriptor !")
def get_stats(self):
"""Get received / dropped statistics"""
try:
ret = fcntl.ioctl(self.ins, BIOCGSTATS, struct.pack("2I", 0, 0))
return struct.unpack("2I", ret)
except IOError:
warning("Unable to get stats from BPF !")
return (None, None)
def get_blen(self):
"""Get the BPF buffer length"""
try:
ret = fcntl.ioctl(self.ins, BIOCGBLEN, struct.pack("I", 0))
return struct.unpack("I", ret)[0]
except IOError:
warning("Unable to get the BPF buffer length")
return
def fileno(self):
"""Get the underlying file descriptor"""
return self.ins
def close(self):
"""Close the Super Socket"""
if not self.closed and self.ins is not None:
os.close(self.ins)
self.closed = True
self.ins = None
def send(self, x):
"""Dummy send method"""
raise Exception(
"Can't send anything with %s" % self.__class__.__name__
)
def recv_raw(self, x=BPF_BUFFER_LENGTH):
"""Dummy recv method"""
raise Exception(
"Can't recv anything with %s" % self.__class__.__name__
)
@staticmethod
def select(sockets, remain=None):
"""This function is called during sendrecv() routine to select
the available sockets.
"""
# sockets, None (means use the socket's recv() )
return bpf_select(sockets, remain)
class L2bpfListenSocket(_L2bpfSocket):
""""Scapy L2 BPF Listen Super Socket"""
def __init__(self, *args, **kwargs):
self.received_frames = []
super(L2bpfListenSocket, self).__init__(*args, **kwargs)
def buffered_frames(self):
"""Return the number of frames in the buffer"""
return len(self.received_frames)
def get_frame(self):
"""Get a frame or packet from the received list"""
if self.received_frames:
return self.received_frames.pop(0)
else:
return None, None, None
@staticmethod
def bpf_align(bh_h, bh_c):
"""Return the index to the end of the current packet"""
# from <net/bpf.h>
return ((bh_h + bh_c) + (BPF_ALIGNMENT - 1)) & ~(BPF_ALIGNMENT - 1)
def extract_frames(self, bpf_buffer):
"""Extract all frames from the buffer and stored them in the received list.""" # noqa: E501
# Ensure that the BPF buffer contains at least the header
len_bb = len(bpf_buffer)
if len_bb < 20: # Note: 20 == sizeof(struct bfp_hdr)
return
# Extract useful information from the BPF header
if FREEBSD:
# Unless we set BIOCSTSTAMP to something different than
# BPF_T_MICROTIME, we will get bpf_hdr on FreeBSD, which means
# that we'll get a struct timeval, which is time_t, suseconds_t.
# On i386 time_t is 32bit so the bh_tstamp will only be 8 bytes.
# We really want to set BIOCSTSTAMP to BPF_T_NANOTIME and be
# done with this and it always be 16?
if platform.machine() == "i386":
# struct bpf_hdr
bh_tstamp_offset = 8
else:
# struct bpf_hdr (64bit time_t) or struct bpf_xhdr
bh_tstamp_offset = 16
elif NETBSD:
# struct bpf_hdr or struct bpf_hdr32
bh_tstamp_offset = 16
else:
# struct bpf_hdr
bh_tstamp_offset = 8
# Parse the BPF header
bh_caplen = struct.unpack('I', bpf_buffer[bh_tstamp_offset:bh_tstamp_offset + 4])[0] # noqa: E501
next_offset = bh_tstamp_offset + 4
bh_datalen = struct.unpack('I', bpf_buffer[next_offset:next_offset + 4])[0] # noqa: E501
next_offset += 4
bh_hdrlen = struct.unpack('H', bpf_buffer[next_offset:next_offset + 2])[0] # noqa: E501
if bh_datalen == 0:
return
# Get and store the Scapy object
frame_str = bpf_buffer[bh_hdrlen:bh_hdrlen + bh_caplen]
self.received_frames.append(
(self.guessed_cls, frame_str, None)
)
# Extract the next frame
end = self.bpf_align(bh_hdrlen, bh_caplen)
if (len_bb - end) >= 20:
self.extract_frames(bpf_buffer[end:])
def recv_raw(self, x=BPF_BUFFER_LENGTH):
"""Receive a frame from the network"""
x = min(x, BPF_BUFFER_LENGTH)
if self.buffered_frames():
# Get a frame from the buffer
return self.get_frame()
# Get data from BPF
try:
bpf_buffer = os.read(self.ins, x)
except EnvironmentError as exc:
if exc.errno != errno.EAGAIN:
warning("BPF recv_raw()", exc_info=True)
return None, None, None
# Extract all frames from the BPF buffer
self.extract_frames(bpf_buffer)
return self.get_frame()
class L2bpfSocket(L2bpfListenSocket):
""""Scapy L2 BPF Super Socket"""
def send(self, x):
"""Send a frame"""
return os.write(self.outs, raw(x))
def nonblock_recv(self):
"""Non blocking receive"""
if self.buffered_frames():
# Get a frame from the buffer
return L2bpfListenSocket.recv(self)
# Set the non blocking flag, read from the socket, and unset the flag
self.set_nonblock(True)
pkt = L2bpfListenSocket.recv(self)
self.set_nonblock(False)
return pkt
class L3bpfSocket(L2bpfSocket):
def recv(self, x=BPF_BUFFER_LENGTH):
"""Receive on layer 3"""
r = SuperSocket.recv(self, x)
if r:
r.payload.time = r.time
return r.payload
return r
def send(self, pkt):
"""Send a packet"""
# Use the routing table to find the output interface
iff = pkt.route()[0]
if iff is None:
iff = conf.iface
# Assign the network interface to the BPF handle
if self.assigned_interface != iff:
try:
fcntl.ioctl(self.outs, BIOCSETIF, struct.pack("16s16x", iff.encode())) # noqa: E501
except IOError:
raise Scapy_Exception("BIOCSETIF failed on %s" % iff)
self.assigned_interface = iff
# Build the frame
#
# LINKTYPE_NULL / DLT_NULL (Loopback) is a special case. From the
# bpf(4) man page (from macOS/Darwin, but also for BSD):
#
# "A packet can be sent out on the network by writing to a bpf file
# descriptor. [...] Currently only writes to Ethernets and SLIP links
# are supported."
#
# Headers are only mentioned for reads, not writes, and it has the
# name "NULL" and id=0.
#
# The _correct_ behaviour appears to be that one should add a BSD
# Loopback header to every sent packet. This is needed by FreeBSD's
# if_lo, and Darwin's if_lo & if_utun.
#
# tuntaposx appears to have interpreted "NULL" as "no headers".
# Thankfully its interfaces have a different name (tunX) to Darwin's
# if_utun interfaces (utunX).
#
# There might be other drivers which make the same mistake as
# tuntaposx, but these are typically provided with VPN software, and
# Apple are breaking these kexts in a future version of macOS... so
# the problem will eventually go away. They already don't work on Macs
# with Apple Silicon (M1).
if DARWIN and iff.startswith('tun') and self.guessed_cls == Loopback:
frame = raw(pkt)
else:
frame = raw(self.guessed_cls() / pkt)
pkt.sent_time = time.time()
# Send the frame
L2bpfSocket.send(self, frame)
# Sockets manipulation functions
def isBPFSocket(obj):
"""Return True is obj is a BPF Super Socket"""
return isinstance(
obj,
(L2bpfListenSocket, L2bpfListenSocket, L3bpfSocket)
)
def bpf_select(fds_list, timeout=None):
"""A call to recv() can return several frames. This functions hides the fact
that some frames are read from the internal buffer."""
# Check file descriptors types
bpf_scks_buffered = list()
select_fds = list()
for tmp_fd in fds_list:
# Specific BPF sockets: get buffers status
if isBPFSocket(tmp_fd) and tmp_fd.buffered_frames():
bpf_scks_buffered.append(tmp_fd)
continue
# Regular file descriptors or empty BPF buffer
select_fds.append(tmp_fd)
if select_fds:
# Call select for sockets with empty buffers
if timeout is None:
timeout = 0.05
ready_list, _, _ = select(select_fds, [], [], timeout)
return bpf_scks_buffered + ready_list
else:
return bpf_scks_buffered
``` |
[
{
"content": "Here is a code file:\n```python\n''' \nThis study is a Bogazici University - NETAS Nova V-Gate collaboration and funded by TEYDEB project \"Realization of Anomaly Detection and Prevention with Learning System Architectures, Quality Improvement, High Rate Service Availability and Rich Services in a... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n''' \nThis study is a Bogazici University - NETAS Nova V-Gate collaboration and funded by TEYDEB project \"Realization of Anomaly Detection and Prevention with Learning System Architectures, Quality Improvement, High Rate Service Availability and Ri... | ```python
'''
This study is a Bogazici University - NETAS Nova V-Gate collaboration and funded by TEYDEB project "Realization of Anomaly Detection and Prevention with Learning System Architectures, Quality Improvement, High Rate Service Availability and Rich Services in a VoIP Firewall Product'', by the Scientific and Technological Research Council Of Turkey (TUBITAK).
'''
import pcap
import threading
import time
import pickle
import numpy as np
import networking
import packet
from stats import *
class Monitor:
''' Monitors the network traffic either by listening to an interface or reading from a pcap/log file.
It also sends the statistics such as packet histogram, call events and resource usage to MonitorClient.
'''
FRAME_LENGTH = 1 # seconds
def __init__(self, interface, stats_names=(), client_address=None, real_time=True):
# Interface
self.interface = interface
self.client_address = client_address
self.real_time = real_time
# set running mode
self.mode = 'online'
if interface.endswith('pcapng') or interface.endswith('pcap'):
self.mode = 'offline_pcap'
elif interface.endswith('pickle'):
self.mode = 'log_file'
# Stats
self.stats = []
for stats_name in stats_names:
self.stats.append(Stats.create_by_name(stats_name))
self.pcap_stats = [0, 0, 0]
# Other
self.current_time = None
self.running = False
self.pcap_handle = pcap.pcapObject()
self.file_handler = None
self.thread = None
def stop(self):
'''Sleeps Monitor so that it stops listening packets.
'''
self.running = False
if self.thread:
self.thread.join()
print 'Monitor: listening stopped.'
if self.mode == 'online':
print self.summary()
def start_daemon(self):
'''This creates a new thread that listens and processes the packets.
Thus, it prevents Monitor from waiting for the end of the processing.
'''
self.thread = threading.Thread(target=self.start)
self.thread.setDaemon(True)
self.thread.start()
def start(self):
'''This function is used by the newly created thread.
The processing of packets are done here.
'''
# Open Interface
try:
if self.mode == 'online':
snap_len, promisc, to_ms = 1600, True, 100
self.pcap_handle.open_live(self.interface, snap_len, promisc, to_ms)
elif self.mode == 'offline_pcap':
self.pcap_handle.open_offline(self.interface)
elif self.mode == 'log_file':
self.file_handler = open(self.interface,'r')
except Exception, e:
print 'Monitor: cannot open interface: %s\nMonitor: %s ' % (self.interface,e)
return
# Initialize time
self.current_time = 0 if self.mode else time.time()
print 'Monitor: listening on %s ...' % self.interface
self.running = True
# Pcap loop:
if self.mode == 'online' or self.mode == 'offline_pcap':
while self.running:
self.pcap_handle.dispatch(1, self.packet_received)
# Pcap stats:
self.pcap_stats = self.pcap_handle.stats()
# log loop
elif self.mode == 'log_file':
try:
while self.running:
logged_msg = pickle.load(self.file_handler)
message = networking.Message('Stats')
for stat in self.stats:
if stat.name in logged_msg.body:
message.insert(stat.name, logged_msg.body[stat.name])
message.sent_to(self.client_address, proto='TCP')
time.sleep(self.FRAME_LENGTH)
except EOFError:
print 'End of log file'
except KeyError as e:
print 'Key error: No ' + e.message + ' in input log file.'
def summary(self):
'''prints number of received and dropped packets
'''
print '%d packets received by filter' % self.pcap_stats[0]
print '%d packets dropped by kernel' % self.pcap_stats[1]
print '%d packets dropped by interface' % self.pcap_stats[2]
def packet_received(self, pktlen, buf, timestamp):
'''It parses each packet and updates statistics.
:param pktlen: packet length
:param buf: buffer
:param timestamp: arrival time of the packet
'''
# Parse packet
pkt = packet.Packet(timestamp, buf)
# Add to each stats collector
for s in self.stats:
s.add_packet(pkt)
# Epoch passed ?
if timestamp - self.current_time >= self.FRAME_LENGTH:
self.current_time = timestamp
self.publish_and_clear_stats()
# Simulating real time ?
if self.mode == 'offline_pcap' and self.real_time:
time.sleep(self.FRAME_LENGTH)
def publish_and_clear_stats(self):
''' Sends the statistics of packets in an time interval and clears statistics after transmission.
'''
# Create Message
message = networking.Message('Stats')
data = []
for s in self.stats:
# Insert stats
s.finalize()
message.insert(s.name, s.get_stats())
# Send Message
message.sent_to(self.client_address, proto='TCP')
# Clear stats
for s in self.stats:
s.clear()
class MonitorServer(networking.UDPServer):
''' MonitorServer carries the commands from MonitorClient to Monitor.
Main function of this is to start and stop the Monitor. If start action is taken,
it creates a Monitor object with a new thread to process the packets.
When stop action comes, the Monitor object is cleared.
'''
DEFAULT_PORT = 5010
def __init__(self, port=0):
networking.UDPServer.__init__(self, ('0.0.0.0', port))
self.monitor = None
self.port = self.server_address[1]
print 'MonitorServer: listening on port: %d' % self.port
def handle_message(self, message, address):
'''It processes messages and takes actions of starting or stopping accordingly.
:param message: The message that is sent from MonitorClient to Monitor
'''
action = message.header.lower()
if action == 'start' and self.monitor is None:
print 'MonitorServer: starting the monitor...'
client_address = (address[0], message.body['port'])
self.monitor = Monitor(message.body['interface'], message.body['stats names'],
client_address, message.body['real time'])
self.monitor.start_daemon()
elif action == 'stop' and self.monitor is not None:
print 'MonitorServer: stopping the monitor...'
self.monitor.stop()
self.monitor = None
class MessageHandler(object):
''' MessageHandler handles messages coming from collectors listed
in the 'collector_names'. The MonitorClient asks MonitorServer to
run the necessary collectors
'''
def __init__(self, name, stats_names):
'''Initializers the MessageHandler.
:param name: the name of MessageHandler
:param stats_names: the name of statistics that MessageHandler wants to take
'''
self.name = name
self.stats_names = stats_names
def handle_message(self, message):
pass
def disconnected(self):
pass
class MonitorClient(networking.TCPServer):
'''
This class communicates with the MonitorServer such that it triggers Monitor to send network statistics.
Then, the MonitorClient distributes the incoming statistics to MessageHandlers.
'''
DEFAULT_PORT = 5011
def __init__(self, monitor_server_address, interface, port=0, real_time=True, verbose=False):
# Initializer TCPServer
networking.TCPServer.__init__(self, ('0.0.0.0', port))
self.port = self.server_address[1]
print 'MonitorClient: listening on port: %d' % self.port
# Monitor Options
self.interface = interface
self.real_time = real_time
self.verbose = verbose
self.monitor_server_address = monitor_server_address
# Dispatcher
self.message_handlers = []
def register(self, message_handler):
''' Register a MessageHandler to the MonitorClient to send the messages.
:param message_handler: a Message Handler
'''
self.message_handlers.append(message_handler)
def handle_message(self, message, address):
''' HandleMessage sends the messages to the registered MessageHandlers.
:param message: a Message
'''
for handler in self.message_handlers:
handler.handle_message(message)
if self.verbose:
print message.body
def run_forever(self):
'''It runs until self.stop() is called or CTRL+C pressed.
'''
self.connect()
self.listen()
self.disconnect()
def connect(self):
'''
Collects the features that are going to be created.
'''
stats_names = set()
for handler in self.message_handlers:
for name in handler.stats_names:
stats_names.add(name)
print 'MonitorClient: connecting to %s:%s' % self.monitor_server_address
params = {'interface': self.interface, 'port': self.port,
'real time': self.real_time, 'stats names': stats_names}
networking.Message('START', params).sent_to(self.monitor_server_address, proto='UDP')
def disconnect(self):
'''Disconnects from MessageHandlers.
'''
networking.Message('STOP').sent_to(self.monitor_server_address, proto='UDP')
# Notify registered message handlers
for handler in self.message_handlers:
handler.disconnected()
print 'MonitorClient:: disconnected'
class Logger(MessageHandler):
''' Dumps the data via pickle
'''
STATS_COLLECTED = ['ResourceUsage', 'PacketHistogram', 'AsteriskLogHistogram']
def __init__(self, filename=None):
super(Logger, self).__init__('Logger', self.STATS_COLLECTED)
if filename is None:
filename = 'log_'+time.asctime().replace(' ', '_')+'.pickle'
self.filename = filename
if filename.endswith('.pickle'):
self.file = open(self.filename,'w')
else:
self.file = open(self.filename+'.pickle','w')
self.headers = []
for stat in self.STATS_COLLECTED:
self.headers += Stats.create_by_name(stat).HEADERS
self.data = np.array([]).reshape(0,len(self.headers))
def handle_message(self, message):
tmp = []
for key in self.STATS_COLLECTED:
tmp = tmp + message.body[key].values()
self.data = np.vstack((self.data, np.array(tmp)))
pickle.dump(message,self.file)
def disconnected(self):
self.file.close()
np.savetxt(self.filename+".headers.txt", self.headers, fmt="%s")
np.savetxt(self.filename+".data.txt", self.data.T)
``` |
[
{
"content": "```python\nfrom __future__ import with_statement\n\nfrom kombu.common import maybe_declare\nfrom kombu.pools import producers\n\nfrom queues import task_exchange\n\npriority_to_routing_key = {'high': 'hipri',\n 'mid': 'midpri',\n 'low': 'lopri'}\... | [
{
"content": "<|memory_start|>```python\nfrom __future__ import with_statement\n\nfrom kombu.common import maybe_declare\nfrom kombu.pools import producers\n\nfrom queues import task_exchange\n\npriority_to_routing_key = {'high': 'hipri',\n 'mid': 'midpri',\n ... | ```python
from __future__ import with_statement
from kombu.common import maybe_declare
from kombu.pools import producers
from queues import task_exchange
priority_to_routing_key = {'high': 'hipri',
'mid': 'midpri',
'low': 'lopri'}
def send_as_task(connection, fun, args=(), kwargs={}, priority='mid'):
payload = {'fun': fun, 'args': args, 'kwargs': kwargs}
routing_key = priority_to_routing_key[priority]
with producers[connection].acquire(block=True) as producer:
maybe_declare(task_exchange, producer.channel)
producer.publish(payload,
serializer='pickle',
compression='bzip2',
exchange=task_exchange,
routing_key=routing_key)
if __name__ == '__main__':
from kombu import Connection
from tasks import hello_task
connection = Connection('amqp://guest:guest@localhost:5672//')
send_as_task(connection, fun=hello_task, args=('Kombu', ), kwargs={},
priority='high')
``` |
[
{
"content": "Here is a code file:\n```python\n#\n# Copyright (C) 2006, 2013 UNINETT AS\n#\n# This file is part of Network Administration Visualized (NAV).\n#\n# NAV is free software: you can redistribute it and/or modify it under\n# the terms of the GNU General Public License version 2 as published by\n# the F... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#\n# Copyright (C) 2006, 2013 UNINETT AS\n#\n# This file is part of Network Administration Visualized (NAV).\n#\n# NAV is free software: you can redistribute it and/or modify it under\n# the terms of the GNU General Public License version 2 as publi... | ```python
#
# Copyright (C) 2006, 2013 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""The smsd queue for the NAV database.
This smsd queue takes care of all communication between smsd and the NAV
database. Replacing the NAV database with some other queue/input should be
possible by implementing the interface seen in this class.
Generally, a phone number is a user and vice versa.
"""
import logging
import nav.db
import sys
# pylint: disable=W0703
class NAVDBQueue(object):
"""The smsd queue for the NAV database."""
def __init__(self):
# Create logger
self.logger = logging.getLogger("nav.smsd.queue")
# Open DB connection early so we can detect errors early
try:
self._connect()
except Exception, error:
self.logger.exception("Queue failed to initialize. Exiting. (%s)",
error)
sys.exit(1)
def cancel(self, minage='0'):
"""
Mark all unsent messages as ignored.
Input:
minage Minimum age required for canceling message, default '0'.
Format as PostgreSQL interval type, e.g. '1 day 12 hours'.
Returns number of messages canceled.
"""
dbconn = self._connect()
db = dbconn.cursor()
data = dict(minage=str(minage))
# Test minage
if minage != '0':
sql = "SELECT interval %(minage)s"
try:
db.execute(sql, data)
except nav.db.driver.ProgrammingError:
self.logger.warning("'autocancel' value (%s) is not valid. " +
"Check config for errors.",
minage)
return 0
except Exception:
self.logger.exception("Unknown exception caught in " +
"cancel(). Exiting.")
sys.exit(1)
# Ignore messages
sql = """UPDATE smsq SET sent = 'I'
WHERE sent = 'N' AND time < now() - interval %(minage)s"""
db.execute(sql, data)
dbconn.commit()
return db.rowcount
def getusers(self, sent='N'):
"""
Get users which has messages with given sent status (normally unsent).
Returns a sorted list with the phone numbers for all users with
messages with given sent status.
"""
users = []
dbconn = self._connect()
db = dbconn.cursor()
data = dict(sent=sent)
sql = """SELECT DISTINCT phone
FROM smsq
WHERE sent = %(sent)s
ORDER BY phone"""
db.execute(sql, data)
result = db.fetchall()
# Rollback so we don't have old open transactions which foobars the
# usage of now() in setsentstatus()
dbconn.rollback()
# Create a simple list without the tuples
for row in result:
users.append(row[0])
return users
def getusermsgs(self, user, sent='N'):
"""
Get the user's messages which has given sent status (normally unsent).
Returns a list of messsages ordered with the most severe first. Each
message is a tuple with the ID, text, and severity of the message.
"""
dbconn = self._connect()
db = dbconn.cursor()
data = dict(phone=user, sent=sent)
sql = """SELECT id, msg, severity
FROM smsq
WHERE phone = %(phone)s AND sent = %(sent)s
ORDER BY severity DESC, time ASC"""
db.execute(sql, data)
result = db.fetchall()
# Rollback so we don't have old open transactions which foobars the
# usage of now() in setsentstatus()
dbconn.rollback()
return result
def getmsgs(self, sent='N'):
"""
Get all messages with given sent status (normally unsent).
Returns a list of dictionaries containing messages details of SMS in
queue with the specified status.
"""
dbconn = self._connect()
db = dbconn.cursor()
data = dict(sent=sent)
sql = """SELECT smsq.id as smsqid, name, msg, time
FROM smsq
JOIN account ON (account.id = smsq.accountid)
WHERE sent = %(sent)s ORDER BY time ASC"""
db.execute(sql, data)
result = []
for (smsqid, name, msg, time) in db.fetchall():
result.append(dict(id=smsqid, name=name, msg=msg,
time=time.strftime("%Y-%m-%d %H:%M")))
# Rollback so we don't have old open transactions which foobars the
# usage of now() in setsentstatus()
dbconn.rollback()
return result
def setsentstatus(self, identifier, sent, smsid=0):
"""
Set the sent status of a message given ID and status.
Returns number of messages changed.
"""
dbconn = self._connect()
db = dbconn.cursor()
if sent == 'Y' or sent == 'I':
sql = """UPDATE smsq
SET sent = %(sent)s, smsid = %(smsid)s, timesent = now()
WHERE id = %(id)s"""
else:
sql = """UPDATE smsq
SET sent = %(sent)s, smsid = %(smsid)s
WHERE id = %(id)s"""
data = dict(sent=sent, smsid=smsid, id=identifier)
db.execute(sql, data)
dbconn.commit()
return db.rowcount
def inserttestmsgs(self, uid, phone, msg):
"""
Insert test messages into the SMS queue for debugging purposes.
Returns a integer indicating how many rows have been inserted.
"""
dbconn = self._connect()
db = dbconn.cursor()
data = dict(uid=uid, phone=phone, msg=msg)
sql = """INSERT INTO smsq (accountid, time, phone, msg) VALUES (
%(uid)s, now(), %(phone)s, %(msg)s)"""
db.execute(sql, data)
dbconn.commit()
return db.rowcount
@staticmethod
@nav.db.retry_on_db_loss(delay=5)
def _connect():
return nav.db.getConnection('smsd', 'navprofile')
``` |
[
{
"content": "Here is the code content:\n```python\n#!/usr/bin/env python\n\n\"\"\"Service process base class and service management\"\"\"\n\n__author__ = 'Adam R. Smith, Michael Meisinger'\n\nimport json\nfrom types import ModuleType\nfrom zope.interface import implementedBy\n\nfrom pyon.core.exception import ... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n\"\"\"Service process base class and service management\"\"\"\n\n__author__ = 'Adam R. Smith, Michael Meisinger'\n\nimport json\nfrom types import ModuleType\nfrom zope.interface import implementedBy\n\nfrom pyon.core.e... | ```python
#!/usr/bin/env python
"""Service process base class and service management"""
__author__ = 'Adam R. Smith, Michael Meisinger'
import json
from types import ModuleType
from zope.interface import implementedBy
from pyon.core.exception import BadRequest, ServerError, NotFound
from pyon.util.log import log
from pyon.util.containers import named_any, itersubclasses
from pyon.util.context import LocalContextMixin
class BaseClients(object):
"""
Basic object to hold clients for a service. Derived in implementations.
Placeholder, may not need any functionality.
"""
pass
class BaseService(LocalContextMixin):
"""
Base class providing a 'service'. Pure Python class. Not dependent on messaging.
Such services can be executed by ION processes.
"""
# The following are set one per implementation (class)
name = None
running = False
dependencies = []
process_type = "service"
def __init__(self, *args, **kwargs):
self.id = None
self._proc_name = None
self._proc_type = None
self._proc_res_id = None
self._proc_start_time = None
self.errcause = None
self.org_governance_name = None
self.container = None
self.CFG = None
self._process = None # reference to IonProcess, internal
super(BaseService, self).__init__()
def init(self):
self._on_init()
return self.on_init()
def _on_init(self):
"""Framework hook to initialize"""
def on_init(self):
"""
Method to be overridden as necessary by implementing service classes to perform
initialization actions prior to service start. Configuration parameters are
accessible via the self.CFG dict.
"""
def start(self):
self._on_start()
return self.on_start()
def _on_start(self):
"""Framework hook to start"""
self.running = True
def on_start(self):
"""
Method called at service startup.
"""
def stop(self):
res = self.on_stop()
self._on_stop()
return res
def _on_stop(self):
"""Framework hook to stop"""
self.running = False
def on_stop(self):
"""
Method called at service stop. (May not be called if service is terminated immediately).
"""
def quit(self):
res = None
try:
res = self.on_quit()
except Exception:
log.exception("Error while service %s, id: %s quitting" % (self.name, self.id))
self._on_quit()
return res
def _on_quit(self):
"""Framework hook to quit"""
self.running = False
def on_quit(self):
"""
Method called just before service termination.
"""
def assert_condition(self, condition, errorstr):
if not condition:
raise BadRequest(errorstr)
def _validate_resource_id(self, arg_name, resource_id, res_type=None, optional=False, allow_subtype=True,
allow_deleted=False):
"""
Check that the given argument is a resource id (string), by retrieving the resource from the
resource registry. Additionally, checks type and returns the result object.
Supports optional argument and subtypes. res_type can be a list of (sub)types.
"""
if optional and not resource_id:
return
if not resource_id and not optional:
raise BadRequest("Argument '%s': missing" % arg_name)
resource_obj = self.clients.resource_registry.read(resource_id)
if resource_obj.lcstate == "DELETED" and not allow_deleted:
raise NotFound("Object with id %s does not exist." % resource_id)
if res_type:
type_list = res_type
if not hasattr(res_type, "__iter__"):
type_list = [res_type]
from pyon.core.registry import issubtype
if allow_subtype and not any(map(lambda check_type: issubtype(resource_obj.type_, check_type), type_list)):
raise BadRequest("Argument '%s': existing resource is not a '%s' -- SPOOFING ALERT" % (arg_name, res_type))
elif not allow_subtype and not any(map(lambda check_type: resource_obj.type_ == check_type, type_list)):
raise BadRequest("Argument '%s': existing resource is not a '%s' -- SPOOFING ALERT" % (arg_name, res_type))
return resource_obj
def _validate_resource_obj(self, arg_name, resource_obj, res_type=None, optional=False, checks=""):
"""
Check that the given argument (object) exists and is a resource object of given type.
Can be None if optional==True.
Optional checks in comma separated string:
- id: resource referenced by ID is compatible and returns it.
- noid: object contains no id
- name: object has non empty name
- unique: name is not used yet in system for given res_type (must be set)
"""
checks = checks.split(",") if isinstance(checks, basestring) else checks
if optional and resource_obj is None:
return
if not resource_obj:
raise BadRequest("Argument '%s': missing" % arg_name)
from interface.objects import Resource
if not isinstance(resource_obj, Resource):
raise BadRequest("Argument '%s': not a resource object" % arg_name)
if "noid" in checks and "_id" in resource_obj:
raise BadRequest("Argument '%s': resource object has an id" % arg_name)
if ("name" in checks or "unique" in checks) and not resource_obj.name:
raise BadRequest("Argument '%s': resource has invalid name" % arg_name)
if "unique" in checks:
if not res_type:
raise BadRequest("Must provide resource type")
res_list, _ = self.clients.resource_registry.find_resources(restype=res_type, name=resource_obj.name)
if res_list:
raise BadRequest("Argument '%s': resource name already exists" % arg_name)
if res_type and resource_obj.type_ != res_type:
raise BadRequest("Argument '%s': resource object type is not a '%s' -- SPOOFING ALERT" % (arg_name, res_type))
if "id" in checks:
if "_id" not in resource_obj:
raise BadRequest("Argument '%s': resource object has no id" % arg_name)
old_resource_obj = self.clients.resource_registry.read(resource_obj._id)
if res_type and old_resource_obj.type_ != res_type:
raise BadRequest("Argument '%s': existing resource is not a '%s' -- SPOOFING ALERT" % (arg_name, res_type))
return old_resource_obj
def _validate_arg_obj(self, arg_name, arg_obj, obj_type=None, optional=False):
"""
Check that the given argument exists and is an object of given type
"""
if optional and arg_obj is None:
return
if not arg_obj:
raise BadRequest("Argument '%s': missing" % arg_name)
from interface.objects import IonObjectBase
if not isinstance(arg_obj, IonObjectBase):
raise BadRequest("Argument '%s': not an object" % arg_name)
if obj_type and arg_obj.type_ != obj_type:
raise BadRequest("Argument '%s': object type is not a '%s'" % (arg_name, obj_type))
def __str__(self):
proc_name = 'Unknown proc_name' if self._proc_name is None else self._proc_name
proc_type = 'Unknown proc_type' if self._proc_type is None else self._proc_type
return "".join((self.__class__.__name__, "(",
"name=", proc_name,
",id=", self.id,
",type=", proc_type,
")"))
def add_endpoint(self, endpoint):
"""
Adds a managed listening endpoint to this service/process.
The service/process must be running inside of an IonProcessThread, or this
method will raise an error.
A managed listening endpoint will report failures up to the process, then to
the container's process manager.
"""
if self._process is None:
raise ServerError("No attached IonProcessThread")
self._process.add_endpoint(endpoint)
def remove_endpoint(self, endpoint):
"""
Removes an endpoint from being managed by this service/process.
The service/process must be running inside of an IonProcessThread, or this
method will raise an error. It will also raise an error if the endpoint is
not currently managed.
Errors raised in the endpoint will no longer be reported to the process or
process manager.
"""
if self._process is None:
raise ServerError("No attached IonProcessThread")
self._process.remove_endpoint(endpoint)
# -----------------------------------------------------------------------------------------------
# Service management infrastructure
class IonServiceDefinition(object):
"""
Provides a walkable structure for ION service metadata and object definitions.
"""
def __init__(self, name, dependencies=None, version=''):
self.name = name
self.dependencies = list(dependencies or [])
self.version = version
self.operations = []
# Points to service (Zope) interface
self.interface = None
# Points to abstract base class
self.base = None
# Points to implementation class
self.impl = []
# Points to process client class
self.client = None
# Contains a dict schema
self.schema = None
# Points to non-process client class
self.simple_client = None
def __str__(self):
return "IonServiceDefinition(name=%s):%s" % (self.name, self.__dict__)
def __repr__(self):
return str(self)
class IonServiceOperation(object):
def __init__(self, name):
self.name = name
self.docstring = ''
self.in_object_type = None
self.out_object_type = None
self.throws = []
def __str__(self):
return "IonServiceOperation(name=%s):%s" % (self.name, self.__dict__)
def __repr__(self):
return str(self)
class IonServiceRegistry(object):
def __init__(self):
self.services = {}
self.services_by_name = {}
self.classes_loaded = False
self.operations = None
def add_servicedef_entry(self, name, key, value, append=False):
if not name:
#log.warning("No name for key=%s, value=%s" % (key, value))
return
if not name in self.services:
svc_def = IonServiceDefinition(name)
self.services[name] = svc_def
else:
svc_def = self.services[name]
oldvalue = getattr(svc_def, key, None)
if oldvalue is not None:
if append:
assert type(oldvalue) is list, "Cannot append to non-list: %s" % oldvalue
oldvalue.append(value)
else:
log.warning("Service %s, key=%s exists. Old=%s, new=%s" % (name, key, getattr(svc_def, key), value))
if not append:
setattr(svc_def, key, value)
@classmethod
def load_service_mods(cls, path, package=""):
if isinstance(path, ModuleType):
for p in path.__path__:
cls.load_service_mods(p, path.__name__)
return
import pkgutil
for mod_imp, mod_name, is_pkg in pkgutil.iter_modules([path]):
if is_pkg:
cls.load_service_mods(path + "/" + mod_name, package + "." + mod_name)
else:
mod_qual = "%s.%s" % (package, mod_name)
try:
named_any(mod_qual)
except Exception as ex:
log.warning("Import module '%s' failed: %s" % (mod_qual, ex))
def build_service_map(self):
"""
Adds all known service definitions to service registry.
@todo: May be a bit fragile due to using BaseService.__subclasses__
"""
for cls in BaseService.__subclasses__():
assert hasattr(cls, 'name'), 'Service class must define name value. Service class in error: %s' % cls
if cls.name:
self.services_by_name[cls.name] = cls
self.add_servicedef_entry(cls.name, "base", cls)
try:
self.add_servicedef_entry(cls.name, "schema", json.loads(cls.SCHEMA_JSON))
except Exception as ex:
log.exception("Cannot parse service schema " + cls.name)
interfaces = list(implementedBy(cls))
if interfaces:
self.add_servicedef_entry(cls.name, "interface", interfaces[0])
if cls.__name__.startswith("Base"):
try:
client = "%s.%sProcessClient" % (cls.__module__, cls.__name__[4:])
self.add_servicedef_entry(cls.name, "client", named_any(client))
sclient = "%s.%sClient" % (cls.__module__, cls.__name__[4:])
self.add_servicedef_entry(cls.name, "simple_client", named_any(sclient))
except Exception, ex:
log.warning("Cannot find client for service %s" % (cls.name))
def discover_service_classes(self):
"""
Walk implementation directories and find service implementation classes.
@todo Only works for ion packages and submodules
"""
IonServiceRegistry.load_service_mods("ion")
sclasses = [s for s in itersubclasses(BaseService) if not s.__subclasses__()]
for scls in sclasses:
self.add_servicedef_entry(scls.name, "impl", scls, append=True)
self.classes_loaded = True
def get_service_base(self, name):
"""
Returns the service base class with interface for the given service name or None.
"""
if name in self.services:
return getattr(self.services[name], 'base', None)
else:
return None
def get_service_by_name(self, name):
"""
Returns the service definition for the given service name or None.
"""
if name in self.services:
return self.services[name]
else:
return None
def is_service_available(self, service_name, local_rr_only=False):
try:
service_resource = None
from pyon.core.bootstrap import container_instance
from interface.objects import ServiceStateEnum
# Use container direct RR connection if available, otherwise use messaging to the RR service
if hasattr(container_instance, 'has_capability') and container_instance.has_capability('RESOURCE_REGISTRY'):
service_resource, _ = container_instance.resource_registry.find_resources(restype='Service', name=service_name)
elif not local_rr_only:
from interface.services.core.iresource_registry_service import ResourceRegistryServiceClient
rr_client = ResourceRegistryServiceClient(container_instance.node)
service_resource, _ = rr_client.find_resources(restype='Service', name=service_name)
else:
log.warn("is_service_available(%s) - No RR connection" % service_name)
# The service is available only of there is a single RR object for it and it is in one of these states:
if service_resource and len(service_resource) > 1:
log.warn("is_service_available(%s) - Found multiple service instances: %s", service_name, service_resource)
# MM 2013-08-17: Added PENDING, because this means service will be there shortly
if service_resource and service_resource[0].state in (ServiceStateEnum.READY, ServiceStateEnum.STEADY, ServiceStateEnum.PENDING):
return True
elif service_resource:
log.warn("is_service_available(%s) - Service resource in invalid state", service_resource)
return False
except Exception as ex:
return False
``` |
[
{
"content": "Repeat the code precisely:\n```python\n\"\"\"\nTests:\n\n1. Galaxies positions are all within the light-cone geometry\n2. The number of Galaxies in Catalogue 0 matches the expected number (optional)\n3. The galaxy ids are unique across all light-cones (optional)\n\"\"\"\nimport logging\n\nfrom tao... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n\"\"\"\nTests:\n\n1. Galaxies positions are all within the light-cone geometry\n2. The number of Galaxies in Catalogue 0 matches the expected number (optional)\n3. The galaxy ids are unique across all light-cones (optional)\n\"\"\"\nimport log... | ```python
"""
Tests:
1. Galaxies positions are all within the light-cone geometry
2. The number of Galaxies in Catalogue 0 matches the expected number (optional)
3. The galaxy ids are unique across all light-cones (optional)
"""
import logging
from tao_validate import ValidateJob
logger = logging.getLogger('detest.'+__name__)
class Validator(ValidateJob):
def __init__(self):
self.doc = __doc__
super(Validator, self).__init__()
def validate(self, args, job_params):
super(Validator, self).validate(args, job_params)
self.catalogues = []
for i in range(self.job_params.NUMBER_OF_CONES):
self.catalogues.append(self.load_csv(i,
usecols=['Galaxy_ID', 'Right_Ascension', 'Declination',
'Redshift_Cosmological']))
self.check_geometry(self.catalogues[i])
self.check_galaxy_count(self.catalogues[0])
if getattr(self.job_params, 'CHECK_UNIQUE', False):
logger.info("Checking Galaxy IDs are unique")
for i in range(self.job_params.NUMBER_OF_CONES-1):
for j in range(i+1, self.job_params.NUMBER_OF_CONES):
logger.debug("Unique Galaxies between catalogues {0} and {1}".format(
i, j))
self.unique_galaxies(self.catalogues[i], self.catalogues[j])
logger.info("Finished Cone Basic Checks.")
return
def check_galaxy_count(self, cat):
"Check the number of galaxies in the supplied catalogue"
if getattr(self.job_params, 'GALAXY_COUNT', None) is None:
logger.info("Skipping galaxy check count")
return
logger.info("Checking galaxy count")
self.assert_true(len(cat) == self.job_params.GALAXY_COUNT,
"Galaxy counts don't match: got {0}, expected {1}".format(
len(cat), self.job_params.GALAXY_COUNT))
return
def unique_galaxies(self, cat1, cat2):
"Check that galaxies only appear in 1 catalogue"
gid1 = set(cat1['Galaxy_ID'].values)
gid2 = set(cat2['Galaxy_ID'].values)
common = gid1 & gid2
self.assert_true(len(common) == 0,
"Galaxy IDs are not unique: {0} in common".format(
len(common)))
return
def check_geometry(self, cat):
"Check that RA, Dec and Redshift are withing the catalogue geometry"
stats = cat['Right_Ascension'].describe()
self.assert_true(stats['max'] <= self.job_params.RA,
"Expected max RA of {0}, got {1}".format(
self.job_params.RA, stats['max']))
self.assert_true(stats['min'] >= 0.0,
"Negative RA: {0}".format(
stats['min']))
stats = cat['Declination'].describe()
self.assert_true(stats['max'] <= self.job_params.DEC,
"Expected max Dec of {0}, got {1}".format(
self.job_params.DEC, stats['max']))
self.assert_true(stats['min'] >= 0.0,
"Negative Dec: {0}".format(
stats['min']))
stats = cat['Redshift_Cosmological'].describe()
self.assert_true(stats['max'] <= self.job_params.REDSHIFT_MAX,
"Expected max Redshift of {0}, got {1}".format(
self.job_params.REDSHIFT_MAX, stats['max']))
self.assert_true(stats['min'] >= self.job_params.REDSHIFT_MIN,
"Expected min Redshift of {0}, got {1}".format(
self.job_params.REDSHIFT_MIN, stats['min']))
return
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Un... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE... | ```python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker import objects
from tacker.objects import base
from tacker.objects import fields
@base.TackerObjectRegistry.register
class Grant(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.StringField(nullable=False),
'vnf_instance_id': fields.StringField(nullable=False),
'vnf_lcm_op_occ_id': fields.StringField(nullable=False),
'vim_connections': fields.ListOfObjectsField(
'VimConnectionInfo', nullable=True, default=[]),
'zones': fields.ListOfObjectsField(
'ZoneInfo', nullable=True, default=[]),
'add_resources': fields.ListOfObjectsField(
'GrantInfo', nullable=True, default=[]),
'remove_resources': fields.ListOfObjectsField(
'GrantInfo', nullable=True, default=[]),
'update_resources': fields.ListOfObjectsField(
'GrantInfo', nullable=True, default=[]),
'vim_assets': fields.ObjectField(
'VimAssets', nullable=True),
'ext_virtual_links': fields.ListOfObjectsField(
'ExtVirtualLinkData', nullable=True, default=[]),
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_grant = super(
Grant, cls).obj_from_primitive(primitive, context)
else:
if 'vim_connections' in primitive.keys():
obj_data = [objects.VimConnectionInfo._from_dict(
vim_conn) for vim_conn in primitive.get(
'vim_connections', [])]
primitive.update({'vim_connections': obj_data})
if 'zones' in primitive.keys():
obj_data = [ZoneInfo._from_dict(
zone) for zone in primitive.get(
'zones', [])]
primitive.update({'zones': obj_data})
if 'add_resources' in primitive.keys():
obj_data = [GrantInfo._from_dict(
add_rsc) for add_rsc in primitive.get(
'add_resources', [])]
primitive.update({'add_resources': obj_data})
if 'remove_resources' in primitive.keys():
obj_data = [GrantInfo._from_dict(
remove_rsc) for remove_rsc in primitive.get(
'remove_resources', [])]
primitive.update({'remove_resources': obj_data})
if 'update_resources' in primitive.keys():
obj_data = [GrantInfo._from_dict(
update_rsc) for update_rsc in primitive.get(
'update_resources', [])]
primitive.update({'update_resources': obj_data})
if 'vim_assets' in primitive.keys():
obj_data = VimAssets.obj_from_primitive(
primitive.get('vim_assets'), context)
primitive.update({'vim_assets': obj_data})
if 'ext_virtual_links' in primitive.keys():
obj_data = [objects.ExtVirtualLinkData.obj_from_primitive(
ext_vir_link, context) for ext_vir_link in primitive.get(
'ext_virtual_links', [])]
primitive.update({'ext_virtual_links': obj_data})
obj_grant = Grant._from_dict(primitive)
return obj_grant
@classmethod
def _from_dict(cls, data_dict):
id = data_dict.get('id')
vnf_instance_id = data_dict.get('vnf_instance_id')
vnf_lcm_op_occ_id = data_dict.get('vnf_lcm_op_occ_id')
vim_connections = data_dict.get('vim_connections', [])
zones = data_dict.get('zones', [])
add_resources = data_dict.get('add_resources', [])
remove_resources = data_dict.get('remove_resources', [])
update_resources = data_dict.get('update_resources', [])
vim_assets = data_dict.get('vim_assets')
ext_virtual_links = data_dict.get('ext_virtual_links', [])
obj = cls(
id=id,
vnf_instance_id=vnf_instance_id,
vnf_lcm_op_occ_id=vnf_lcm_op_occ_id,
vim_connections=vim_connections,
zones=zones,
add_resources=add_resources,
remove_resources=remove_resources,
update_resources=update_resources,
vim_assets=vim_assets,
ext_virtual_links=ext_virtual_links)
return obj
@base.TackerObjectRegistry.register
class ZoneInfo(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.StringField(nullable=False),
'zone_id': fields.StringField(nullable=False),
'vim_connection_id': fields.StringField(nullable=True)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_zone_info = super(
ZoneInfo, cls).obj_from_primitive(primitive, context)
else:
obj_zone_info = ZoneInfo._from_dict(primitive)
return obj_zone_info
@classmethod
def _from_dict(cls, data_dict):
id = data_dict.get('id')
zone_id = data_dict.get('zone_id')
vim_connection_id = data_dict.get('vim_connection_id')
obj = cls(
id=id,
zone_id=zone_id,
vim_connection_id=vim_connection_id)
return obj
@base.TackerObjectRegistry.register
class GrantInfo(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'resource_definition_id': fields.StringField(nullable=False),
'vim_connection_id': fields.StringField(nullable=True),
'zone_id': fields.StringField(nullable=True)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_grant_info = super(
GrantInfo, cls).obj_from_primitive(primitive, context)
else:
obj_grant_info = GrantInfo._from_dict(primitive)
return obj_grant_info
@classmethod
def _from_dict(cls, data_dict):
resource_definition_id = data_dict.get('resource_definition_id')
vim_connection_id = data_dict.get('vim_connection_id')
zone_id = data_dict.get('zone_id')
obj = cls(
resource_definition_id=resource_definition_id,
vim_connection_id=vim_connection_id,
zone_id=zone_id)
return obj
@base.TackerObjectRegistry.register
class VimAssets(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'compute_resource_flavours': fields.ListOfObjectsField(
'VimComputeResourceFlavour', nullable=True, default=[]),
'software_images': fields.ListOfObjectsField(
'VimSoftwareImage', nullable=True, default=[])
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_vim_assets = super(
VimAssets, cls).obj_from_primitive(primitive, context)
else:
if 'compute_resource_flavours' in primitive.keys():
obj_data = [VimComputeResourceFlavour._from_dict(
flavour) for flavour in primitive.get(
'compute_resource_flavours', [])]
primitive.update({'compute_resource_flavours': obj_data})
if 'software_images' in primitive.keys():
obj_data = [VimSoftwareImage._from_dict(
img) for img in primitive.get(
'software_images', [])]
primitive.update({'software_images': obj_data})
obj_vim_assets = VimAssets._from_dict(primitive)
return obj_vim_assets
@classmethod
def _from_dict(cls, data_dict):
compute_resource_flavours = data_dict.get(
'compute_resource_flavours', [])
software_images = data_dict.get('software_images', [])
obj = cls(
compute_resource_flavours=compute_resource_flavours,
software_images=software_images)
return obj
@base.TackerObjectRegistry.register
class VimComputeResourceFlavour(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'vim_connection_id': fields.StringField(nullable=True),
'vnfd_virtual_compute_desc_id': fields.StringField(nullable=False),
'vim_flavour_id': fields.StringField(nullable=False)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_flavour = super(
VimComputeResourceFlavour,
cls).obj_from_primitive(
primitive,
context)
else:
obj_flavour = VimComputeResourceFlavour._from_dict(primitive)
return obj_flavour
@classmethod
def _from_dict(cls, data_dict):
vim_connection_id = data_dict.get('vim_connection_id')
vnfd_virtual_compute_desc_id = data_dict.get(
'vnfd_virtual_compute_desc_id')
vim_flavour_id = data_dict.get('vim_flavour_id')
obj = cls(
vim_connection_id=vim_connection_id,
vnfd_virtual_compute_desc_id=vnfd_virtual_compute_desc_id,
vim_flavour_id=vim_flavour_id)
return obj
@base.TackerObjectRegistry.register
class VimSoftwareImage(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'vim_connection_id': fields.StringField(nullable=True),
'vnfd_software_image_id': fields.StringField(nullable=False),
'vim_software_image_id': fields.StringField(nullable=False)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_img = super(
VimSoftwareImage, cls).obj_from_primitive(primitive, context)
else:
obj_img = VimSoftwareImage._from_dict(primitive)
return obj_img
@classmethod
def _from_dict(cls, data_dict):
vim_connection_id = data_dict.get('vim_connection_id')
vnfd_software_image_id = data_dict.get('vnfd_software_image_id')
vim_software_image_id = data_dict.get('vim_software_image_id')
obj = cls(
vim_connection_id=vim_connection_id,
vnfd_software_image_id=vnfd_software_image_id,
vim_software_image_id=vim_software_image_id)
return obj
``` |
[
{
"content": "```python\n#\n# ParameterWeaver: a code generator to handle command line parameters\n# and configuration files for C/C++/Fortran/R/Octave\n# Copyright (C) 2013 Geert Jan Bex <geertjan.bex@uhasselt.be>\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms... | [
{
"content": "<|memory_start|>```python\n#\n# ParameterWeaver: a code generator to handle command line parameters\n# and configuration files for C/C++/Fortran/R/Octave\n# Copyright (C) 2013 Geert Jan Bex <geertjan.bex@uhasselt.be>\n# \n# This program is free software: you can redistribute it and/or modify\n# it... | ```python
#
# ParameterWeaver: a code generator to handle command line parameters
# and configuration files for C/C++/Fortran/R/Octave
# Copyright (C) 2013 Geert Jan Bex <geertjan.bex@uhasselt.be>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from vsc.parameter_weaver.base_formatter import BaseFormatter
from vsc.parameter_weaver.temporaryfile import TemporaryFile
from vsc.parameter_weaver.r.validator import String, Logical
from vsc.util import Indenter
class Formatter(BaseFormatter):
def __init__(self, parameters):
super(Formatter, self).__init__(parameters)
self._def_ext = '.r'
def initialization(self):
indenter = Indenter(self.indent_string)
for param in self._parameters:
name = param.name
default = param.default
if isinstance(param.type, Logical):
indenter.add('{0} <- FALSE'.format(name))
elif isinstance(param.type, String):
indenter.add('{0} <- "{1}";'.format(name, default))
else:
indenter.add('{0} <- {1};'.format(name, default))
return indenter.text()
def parser(self):
indenter = Indenter(self.indent_string)
indenter.add('cl_params = commandArgs(trailingOnly = TRUE)')
indenter.add('cl_last = 1')
indenter.add('while (cl_last <= length(cl_params)) {')
indenter.incr()
for param in self._parameters:
name = param.name
indenter.add('if (cl_params[cl_last] == "-{0}") {{'.format(name))
indenter.incr()
if isinstance(param.type, Logical):
indenter.add('{0} <- TRUE'.format(name))
else:
indenter.add('cl_last <- cl_last + 1')
indenter.add('if (cl_last > length(cl_params)) {').incr()
indenter.add('write("### error: option \'-{0}\' expects a value", stderr())'.format(name))
indenter.add('quit()')
indenter.decr().add('}')
indenter.add('argv_str <- cl_params[cl_last]')
val_func = param.type.validation_function('argv_str')
indenter.add('if (!{0}) {{'.format(val_func)).incr()
indenter.add('write("### error: invalid value for option \'-{0}\' of type {1}", stderr())'.format(name, param.type.name))
indenter.add('quit()')
indenter.decr().add('}')
indenter.add(param.type.input_conversion(name))
indenter.add('cl_last <- cl_last + 1')
indenter.add('next')
indenter.decr().add('}')
indenter.add('break')
indenter.decr().add('}')
indenter.add('if (cl_last > length(cl_params)) {').incr()
indenter.add('cl_params <- c()')
indenter.decr().add('} else {').incr()
indenter.add('cl_params <- cl_params[cl_last:length(cl_params)]')
indenter.decr().add('}')
return indenter.text()
def dumper(self):
indenter = Indenter(self.indent_string)
indenter.add('dump_cl <- function(fileConn, prefix) {').incr()
for param in self._parameters:
name = param.name
format_string = param.type.format_string
if isinstance(param.type, String):
indenter.add('write(sprintf("%s{0} = \'{1}\'", prefix, {0}), fileConn)'.format(name, format_string))
else:
indenter.add('write(sprintf("%s{0} = {1}", prefix, {0}), fileConn)'.format(name, format_string))
indenter.decr().add('}')
return indenter.text()
def definition_file(self, file_name):
'''Returns contents of definition file'''
indenter = Indenter(self.indent_string)
indenter.add(self.initialization())
indenter.add()
indenter.add(self.parser())
indenter.add()
indenter.add(self.dumper())
return indenter.text()
def get_artifacts(self, base_name):
return [
TemporaryFile(base_name, '.r', self.definition_file(base_name))]
``` |
[
{
"content": "Here is the script:\n```python\n# Copyright 2020 Makani Technologies LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n# Copyright 2020 Makani Technologies LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apac... | ```python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for network.yaml."""
import collections
import os
import sys
import gflags
import makani
from makani.lib.python import string_util
import yaml
class NetworkConfigException(Exception):
pass
def _GetPortNumber(port, port_names):
if isinstance(port, int):
return port
else:
for p, name in port_names.iteritems():
if name == port:
return p
raise NetworkConfigException('Invalid port specification: {}.'.format(port))
def _ConvertPortNumbersInList(names_or_numbers, port_names):
return [_GetPortNumber(n, port_names) for n in names_or_numbers]
def _RangeParse(node_select):
nodes = set()
for item in node_select:
if isinstance(item, list):
nodes.update(range(*item))
else:
nodes.add(item)
return nodes
def _ValidateMessageTypes(message_types, supplied_types):
for supplied in supplied_types:
for m in message_types:
if supplied == m['name']:
break
else:
raise NetworkConfigException('Message type %s is invalid.' % supplied)
# Flatten lists of lists, and make raw elements into lists of one item; yaml
# doesn't support merging lists automatically.
def _FlattenList(l):
"""Flattens lists of lists into plain lists, recursively.
For example, [[4, 5, 6], [7, 8], []] will become [4, 5, 6, 7, 8].
Non-list elements will get wrapped in a list, so 'foo' becomes ['foo'].
None becomes [].
Args:
l: a list, or not.
Returns:
A flattened list.
"""
ret = []
if l is None:
pass
elif not isinstance(l, list):
ret = [l]
else:
for item in l:
ret.extend(_FlattenList(item))
return ret
def _FlattenNodeList(l):
"""Replaces a list of list of nodes with a list of node names."""
return [n['name'] for n in _FlattenList(l)]
class AioNode(object):
"""Wrapper class for AIO node definition in network.yaml."""
def __init__(self, config, kind, instance):
self._config = config
self._kind = kind
self._instance = instance
self._enum_value = config._aio_enum_value # pylint: disable=protected-access
config._aio_enum_value += 1 # pylint: disable=protected-access
for i, node in enumerate(kind['instances']):
if node['name'] == instance['name']:
self._index = i
break
else:
raise NetworkConfigException('Node name not found under this label name.')
if not string_util.IsSnakeCase(self.snake_name):
raise NetworkConfigException('AioNode name is not snake case: %s'
% self.snake_name)
def __hash__(self):
return self.snake_name.__hash__()
def __eq__(self, other):
return self.snake_name == other.snake_name
def __cmp__(self, other):
return cmp(self.enum_value, other.enum_value)
@property
def label_name(self):
return self._kind['label_name']
@property
def snake_name(self):
return self._instance['name']
@property
def camel_name(self):
return string_util.SnakeToCamel(self.snake_name)
@property
def enum_name(self):
return 'kAioNode' + self.camel_name
@property
def application_path(self):
return self._kind['application']
@property
def bootloader_path(self):
return self._kind.get('bootloader',
self._config._yaml['default_bootloader']) # pylint: disable=protected-access
@property
def bootloader_application_path(self):
return self._kind.get(
'bootloader_application',
self._config._yaml['default_bootloader_application']) # pylint: disable=protected-access
@property
def label_value(self):
return self._index
@property
def ip_octet(self):
return self._instance['ip']
@property
def ip(self):
return '192.168.1.%d' % self.ip_octet
@property
def enum_value(self):
return self._enum_value
@property
def tms570_node(self):
return self.snake_name in self._config._yaml['tms570s'] # pylint: disable=protected-access
@property
def q7_node(self):
return self.snake_name in self._config._yaml['q7s'] # pylint: disable=protected-access
class MessageRoute(object):
"""Wrapper class for message route definitions in network.yaml."""
def __init__(self, config, route):
self._config = config
self._route = route
required_fields = ['senders', 'receivers']
if not all(x in route for x in required_fields):
raise NetworkConfigException(
'MessageRoute missing one or more required fields: %s' %
', '.join(required_fields))
@property
def senders(self):
return frozenset(self._config.GetAioNode(name)
for name in self._route['senders'])
@property
def receivers(self):
return frozenset(self._config.GetAioNode(name)
for name in self._route['receivers'])
class MessageType(object):
"""Wrapper class for message type definition in network.yaml."""
def __init__(self, config, message, type_name, enum_value):
self._config = config
self._message = message
self._type_name = type_name.capitalize()
self._enum_value = enum_value
required_fields = ['name', 'freq', 'routes']
if not all(x in self._message for x in required_fields):
raise NetworkConfigException(
'Message %s missing one or more required fields: %s' %
(self._message.get('name', ''), ', '.join(required_fields)))
if not string_util.IsCamelCase(self.name):
raise NetworkConfigException('MessageType name is not camel case: %s'
% self.camel_name)
self._routes = [MessageRoute(config, r) for r in message['routes']]
def __hash__(self):
return self.name.__hash__()
def __eq__(self, other):
return self.name == other.name
def __cmp__(self, other):
type_name_compare = cmp(self.type_name, other.type_name)
if type_name_compare == 0:
return cmp(self.enum_value, other.enum_value)
return type_name_compare
@property
def name(self):
return self._message['name']
@property
def snake_name(self):
return string_util.CamelToSnake(self.name)
@property
def type_name(self):
return self._type_name
@property
def enum_prefix(self):
# TODO: Rename MessageType to AioMessageType.
if self.type_name == 'Aio':
return 'MessageType'
return '%sMessageType' % self.type_name
@property
def enum_name(self):
return 'k%s%s' % (self.enum_prefix, self.name)
@property
def enum_value(self):
return self._enum_value
@property
def ip(self):
if self.type_name == 'Aio':
return '239.0.0.%d' % self.enum_value
elif self.type_name == 'Eop':
return '239.0.2.%d' % self.enum_value
elif self.type_name == 'Winch':
return '239.0.1.%d' % self.enum_value
raise ValueError('Unknown message type name: ' + self.type_name)
@property
def frequency_hz(self):
return self._message['freq']
@property
def all_senders(self):
return frozenset(name for route in self._routes for name in route.senders)
@property
def all_receivers(self):
return frozenset(name for route in self._routes for name in route.receivers)
@property
def routes(self):
return self._routes
@property
def inhibit_routing(self):
return self._message.get('inhibit_routing', False)
@property
def inhibit_cvt(self):
return self._message.get('inhibit_cvt', False)
@property
def aio_message(self):
return self.type_name == 'Aio'
@property
def eop_message(self):
return self.type_name == 'Eop'
@property
def winch_message(self):
return self.type_name == 'Winch'
class NetworkConfig(object):
"""Wrapper for the network.yaml file.
Provides an interface to access information about AioNodes and MessageTypes.
"""
def _PreprocessMessageTypes(self, key, y):
"""Preprocesses a message list.
Args:
key: YAML file key for message list (e.g., aio_messages).
y: Loaded YAML file.
Returns:
The processed message list.
"""
if key in y:
for m in y[key]:
if 'routes' in m:
for r in m['routes']:
r['receivers'] = _FlattenNodeList(r['receivers'])
r['senders'] = _FlattenNodeList(r['senders'])
else:
m['routes'] = []
if 'receivers' in m and 'senders' in m:
m['routes'].append({
'receivers': _FlattenNodeList(m['receivers']),
'senders': _FlattenNodeList(m['senders'])})
m.pop('receivers')
m.pop('senders')
return y[key]
return []
def _PreprocessYamlFile(self, yaml_file):
"""Read the YAML file and prepare it for processing.
Flatten lists, generate masks, convert port names into numbers, etc.
Args:
yaml_file: Path to the network.yaml file.
Returns:
The parsed YAML file.
Raises:
NetworkConfigException: if there is overlap between C network ports and
A or B network ports.
"""
if not yaml_file:
yaml_file = os.path.join(makani.HOME, 'avionics/network/network.yaml')
with open(yaml_file, 'r') as f:
y = yaml.full_load(f)
y['tms570s'] = _FlattenNodeList(y['tms570s'])
y['q7s'] = _FlattenNodeList(y['q7s'])
all_message_types = []
all_message_types += self._PreprocessMessageTypes('aio_messages', y)
all_message_types += self._PreprocessMessageTypes('eop_messages', y)
all_message_types += self._PreprocessMessageTypes('winch_messages', y)
for switch_name, switch in y['switches'].iteritems():
if 'config' in switch:
# Note: If config is shared and port name/number assignments are ever
# allowed to differ between config users, we'll need to clone it
# before modifying it.
config = switch['config']
for l in ['network_a', 'network_b', 'network_c', 'unicast']:
if l in config:
config[l] = _RangeParse(config[l])
if set(config.get('network_c', [])) & (
set(config.get('network_a', []))
| set(config.get('network_b', []))):
raise NetworkConfigException(
'A port on %s is assigned to network C as well as A or B.'
% switch_name)
if 'port_names' in switch:
port_names = switch['port_names']
if 'trunk' in config:
trunk = config['trunk']
trunk['ports'] = _ConvertPortNumbersInList(trunk['ports'],
port_names)
if 'unicast_learning' in trunk:
trunk['unicast_learning'] = _ConvertPortNumbersInList(
trunk['unicast_learning'], port_names)
if 'network_c_transit' in trunk:
trunk['network_c_transit'] = _ConvertPortNumbersInList(
trunk['network_c_transit'], port_names)
overrides = trunk['override_message_routes']
for k, v in overrides.iteritems():
overrides[k] = _ConvertPortNumbersInList(v, port_names)
_ValidateMessageTypes(all_message_types, overrides.iterkeys())
bandwidth = {}
for port_name, port_bandwidth in config['bandwidth'].iteritems():
if port_name == 'default':
bandwidth['default'] = port_bandwidth
else:
bandwidth[_GetPortNumber(
port_name, port_names)] = port_bandwidth
config['bandwidth'] = bandwidth
config['restrict'] = {
_GetPortNumber(k, port_names): v
for k, v in config.get('restrict', {}).iteritems()}
for v in config['restrict'].itervalues():
_ValidateMessageTypes(all_message_types, v)
self._yaml = y
return y
def _GenerateAioNodes(self):
"""Generate the list of AIO nodes.
Ensure a sorted ordering of AIO nodes as found in the YAML file.
"""
self._aio_nodes = []
self._aio_nodes_by_name = {}
self._aio_nodes_by_label = collections.defaultdict(list)
self._aio_enum_value = 0
for kind in self._yaml['aio_nodes']:
for instance in kind['instances']:
node = AioNode(self, kind, instance)
self._aio_nodes.append(node)
self._aio_nodes_by_name[node.snake_name] = node
self._aio_nodes_by_label[node.label_name].append(node)
self._aio_nodes = tuple(self._aio_nodes)
def _GenerateMessages(self, type_name, y):
"""Generate the list of AIO nodes.
Args:
type_name: Message type name (e.g., 'Aio' or 'Winch').
y: A parsed YAML file.
Raises:
NetworkConfigException: if message indices are invalid.
"""
key = type_name.lower() + '_messages'
message_types = []
message_types_by_name = {}
if key in y:
static_assignments = {m['name']: m['index'] for m in y[key]
if 'index' in m}
value = 0
used_values = set(static_assignments.values())
if len(static_assignments) != len(used_values):
raise NetworkConfigException('Duplicate message indices in %s.' % key)
if used_values and (min(used_values) < 0 or max(used_values) > 255):
raise NetworkConfigException('Invalid message indices in %s.' % key)
for message in y[key]:
if message['name'] in static_assignments:
enum_value = static_assignments[message['name']]
else:
while value in used_values:
value += 1
used_values.add(value)
enum_value = value
message_type = MessageType(self, message, type_name, enum_value)
message_types.append(message_type)
message_types_by_name[message_type.name] = message_type
setattr(self, '_%s' % key, message_types)
setattr(self, '_%s_by_name' % key, message_types_by_name)
def _ValidateAioNodeIps(self):
"""Ensure that IPs are not duplicated, and that unused IPs are declared."""
used_ips = set()
unused_ips_list = _RangeParse(self._yaml.get('unused_ips', []))
unused_ips = set(unused_ips_list)
next_ip = self._yaml['next_ip']
unknown_ip = self._yaml['unknown_ip']
for node in self._aio_nodes:
ip = node.ip_octet
if ip in used_ips:
raise NetworkConfigException('IP address %d is used more than once.'
% ip)
if ip in unused_ips:
raise NetworkConfigException('IP address %d is used and unused.' % ip)
if ip >= next_ip and ip != unknown_ip:
raise NetworkConfigException('An address at or above next_ip is in '
'use.')
used_ips.add(ip)
covered_range = used_ips.union(unused_ips)
expected_range = set(range(next_ip))
expected_range.add(unknown_ip)
missed_ips = covered_range.symmetric_difference(expected_range)
if missed_ips:
raise NetworkConfigException('Address range through "next_ip" isn\'t '
'fully covered by used IPs and "unused_ips";'
' errors are %s.' % missed_ips)
def _ValidateMessageSortOrder(self, message_types):
"""Ensure that messages are unique and sorted alphabetically."""
names = [m.name for m in message_types]
sorted_names = sorted(names, key=lambda s: s.lower())
if names != sorted_names:
raise NetworkConfigException('Messages are not in alphabetical order.')
if len(set(names)) != len(names):
raise NetworkConfigException('Duplicate message entry found.')
def _ValidateAioNodeSortOrder(self):
"""Ensure that AIO nodes are unique and sorted alphabetically.
Motors are not sorted alphabetically.
Raises:
NetworkConfigException: if nodes are not in order.
"""
node_names = []
for node in self._aio_nodes:
if node.label_name != 'unknown':
node_names.append(node.enum_name)
sorted_node_names = sorted(node_names, key=lambda s: s.lower())
for a, b in zip(node_names, sorted_node_names):
if a != b and not ('Motor' in a and 'Motor' in b):
raise NetworkConfigException('Node sort order violation near "%s".' % a)
if len(set(node_names)) != len(node_names):
raise NetworkConfigException('Duplicate name AIO node entry found.')
def __init__(self, yaml_file=None):
y = self._PreprocessYamlFile(yaml_file)
self._GenerateAioNodes()
self._GenerateMessages('Aio', y)
self._GenerateMessages('Eop', y)
self._GenerateMessages('Winch', y)
self._ValidateAioNodeIps()
self._ValidateMessageSortOrder(self.aio_messages)
self._ValidateMessageSortOrder(self.eop_messages)
self._ValidateMessageSortOrder(self.winch_messages)
self._ValidateAioNodeSortOrder()
@property
def aio_nodes(self):
return self._aio_nodes
def GetAioNode(self, name):
if name.startswith('kAioNode'):
name = name[len('kAioNode'):]
if string_util.IsCamelCase(name):
name = string_util.CamelToSnake(name)
name = name.lower()
try:
return self._aio_nodes_by_name[name]
except KeyError:
raise ValueError('Invalid node name: %s' % name)
def GetAioMessageType(self, name):
if name.startswith('kMessageType'):
name = name[len('kMessageType'):]
return self._aio_messages_by_name[name]
@property
def aio_labels(self):
return sorted(self._aio_nodes_by_label.iterkeys())
def GetAioNodesByLabel(self, label):
if label not in self._aio_nodes_by_label:
raise ValueError('Invalid label: %s' % label)
return tuple(self._aio_nodes_by_label[label])
@property
def messages_by_type(self):
return {
'Aio': self.aio_messages,
'Eop': self.eop_messages,
'Winch': self.winch_messages
}
@property
def all_messages(self):
return self._aio_messages + self._eop_messages + self._winch_messages
@property
def aio_messages(self):
return self._aio_messages
@property
def eop_messages(self):
return self._eop_messages
@property
def winch_messages(self):
return self._winch_messages
# TODO: Split this section of network.yaml into its own file.
def GetSwitchChips(self):
return self._yaml['switch_chips']
# TODO: Split this section of network.yaml into its own file.
def GetSwitches(self):
return self._yaml['switches']
def ParseGenerationFlags(argv):
"""Use gflags to parse arguments specific to network.yaml code generation."""
gflags.DEFINE_string('autogen_root', makani.HOME,
'Root of the source tree for the output files.')
gflags.DEFINE_string('output_dir', '.',
'Full path to the directory for output files.')
gflags.DEFINE_string('network_file', None,
'Full path to the yaml file that describes the network.')
try:
argv = gflags.FLAGS(argv)
except gflags.FlagsError, e:
sys.stderr.write('\nError: %s\n\nUsage: %s ARGS\n%s\n'
% (e, argv[0], gflags.FLAGS))
sys.exit(1)
argv = gflags.FLAGS(argv)
return gflags.FLAGS, argv
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.