text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import __builtin__
import __future__
import functools
import imp
import inspect
import json
from pathlib import Path, PureWindowsPath, PurePath
import optparse
import os
import os.path
import subprocess
import sys
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
BUILD_FUNCTIONS = []
class SyncCookieState(object):
"""
Process-wide state used to enable Watchman sync cookies only on
the first query issued.
"""
def __init__(self):
self.use_sync_cookies = True
class BuildContextType(object):
"""
Identifies the type of input file to the processor.
"""
BUILD_FILE = 'build_file'
INCLUDE = 'include'
class BuildFileContext(object):
"""
The build context used when processing a build file.
"""
type = BuildContextType.BUILD_FILE
def __init__(self, base_path, dirname, allow_empty_globs, watchman_client,
watchman_watch_root, watchman_project_prefix, sync_cookie_state,
watchman_error):
self.globals = {}
self.includes = set()
self.base_path = base_path
self.dirname = dirname
self.allow_empty_globs = allow_empty_globs
self.watchman_client = watchman_client
self.watchman_watch_root = watchman_watch_root
self.watchman_project_prefix = watchman_project_prefix
self.sync_cookie_state = sync_cookie_state
self.watchman_error = watchman_error
self.rules = {}
class IncludeContext(object):
"""
The build context used when processing an include.
"""
type = BuildContextType.INCLUDE
def __init__(self):
self.globals = {}
self.includes = set()
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({'build_env': self.build_env})
return self.func(*args, **updated_kwargs)
def provide_for_build(func):
BUILD_FUNCTIONS.append(func)
return func
def add_rule(rule, build_env):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `{}()` at the top-level of an included file."
.format(rule['buck.type']))
# Include the base path of the BUILD file so the reader consuming this
# JSON will know which BUILD file the rule came from.
if 'name' not in rule:
raise ValueError(
'rules must contain the field \'name\'. Found %s.' % rule)
rule_name = rule['name']
if rule_name in build_env.rules:
raise ValueError('Duplicate rule definition found. Found %s and %s' %
(rule, build_env.rules[rule_name]))
rule['buck.base_path'] = build_env.base_path
build_env.rules[rule_name] = rule
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
args_key = repr(args)
if args_key in self.cache:
return self.cache[args_key]
else:
value = self.func(*args)
self.cache[args_key] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
@provide_for_build
def glob(includes, excludes=[], include_dotfiles=False, build_env=None, search_base=None):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `glob()` at the top-level of an included file.")
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(includes, basestring), \
"The first argument to glob() must be a list of strings."
assert not isinstance(excludes, basestring), \
"The excludes argument must be a list of strings."
results = None
if not includes:
results = []
elif build_env.watchman_client:
try:
results = glob_watchman(
includes,
excludes,
include_dotfiles,
build_env.base_path,
build_env.watchman_watch_root,
build_env.watchman_project_prefix,
build_env.sync_cookie_state,
build_env.watchman_client)
except build_env.watchman_error, e:
print >>sys.stderr, 'Watchman error, falling back to slow glob: ' + str(e)
try:
build_env.watchman_client.close()
except:
pass
build_env.watchman_client = None
if results is None:
if search_base is None:
search_base = Path(build_env.dirname)
results = glob_internal(
includes,
excludes,
include_dotfiles,
search_base)
assert build_env.allow_empty_globs or results, (
"glob(includes={includes}, excludes={excludes}, include_dotfiles={include_dotfiles}) " +
"returned no results. (allow_empty_globs is set to false in the Buck " +
"configuration)").format(
includes=includes,
excludes=excludes,
include_dotfiles=include_dotfiles)
return results
def merge_maps(*header_maps):
result = {}
for header_map in header_maps:
for key in header_map:
if key in result and result[key] != header_map[key]:
assert False, 'Conflicting header files in header search paths. ' + \
'"%s" maps to both "%s" and "%s".' \
% (key, result[key], header_map[key])
result[key] = header_map[key]
return result
def single_subdir_glob(dirpath, glob_pattern, excludes=[], prefix=None, build_env=None,
search_base=None):
results = {}
files = glob([os.path.join(dirpath, glob_pattern)],
excludes=excludes,
build_env=build_env,
search_base=search_base)
for f in files:
if dirpath:
key = f[len(dirpath) + 1:]
else:
key = f
if prefix:
# `f` is a string, but we need to create correct platform-specific Path.
# Using Path straight away won't work because it will use host's class, which is Posix
# on OS X. For the same reason we can't use os.path.join.
# Here we try to understand if we're running Windows test, then use WindowsPath
# to build up the key with prefix, allowing test to pass.
cls = PureWindowsPath if "\\" in f else PurePath
key = str(cls(prefix) / cls(key))
results[key] = f
return results
@provide_for_build
def subdir_glob(glob_specs, excludes=[], prefix=None, build_env=None, search_base=None):
"""
Given a list of tuples, the form of (relative-sub-directory, glob-pattern),
return a dict of sub-directory relative paths to full paths. Useful for
defining header maps for C/C++ libraries which should be relative the given
sub-directory.
If prefix is not None, prepends it it to each key in the dictionary.
"""
results = []
for dirpath, glob_pattern in glob_specs:
results.append(
single_subdir_glob(dirpath, glob_pattern, excludes, prefix, build_env, search_base))
return merge_maps(*results)
def format_watchman_query_params(includes, excludes, include_dotfiles, relative_root):
match_exprs = ["allof", "exists", ["anyof", ["type", "f"], ["type", "l"]]]
match_flags = {}
if include_dotfiles:
match_flags["includedotfiles"] = True
if includes:
match_exprs.append(
["anyof"] + [["match", i, "wholename", match_flags] for i in includes])
if excludes:
match_exprs.append(
["not",
["anyof"] + [["match", x, "wholename", match_flags] for x in excludes]])
return {
"relative_root": relative_root,
# Explicitly pass an empty path so Watchman queries only the tree of files
# starting at base_path.
"path": [''],
"fields": ["name"],
"expression": match_exprs,
}
@memoized
def glob_watchman(includes, excludes, include_dotfiles, base_path, watchman_watch_root,
watchman_project_prefix, sync_cookie_state, watchman_client):
assert includes, "The includes argument must be a non-empty list of strings."
if watchman_project_prefix:
relative_root = os.path.join(watchman_project_prefix, base_path)
else:
relative_root = base_path
query_params = format_watchman_query_params(
includes, excludes, include_dotfiles, relative_root)
# Sync cookies cause a massive overhead when issuing thousands of
# glob queries. Only enable them (by not setting sync_timeout to 0)
# for the very first request issued by this process.
if sync_cookie_state.use_sync_cookies:
sync_cookie_state.use_sync_cookies = False
else:
query_params["sync_timeout"] = 0
query = ["query", watchman_watch_root, query_params]
res = watchman_client.query(*query)
if res.get('warning'):
print >> sys.stderr, 'Watchman warning from query {}: {}'.format(
query,
res.get('warning'))
result = res.get('files', [])
return sorted(result)
def glob_internal(includes, excludes, include_dotfiles, search_base):
def includes_iterator():
for pattern in includes:
for path in search_base.glob(pattern):
# TODO(user): Handle hidden files on Windows.
if path.is_file() and (include_dotfiles or not path.name.startswith('.')):
yield path.relative_to(search_base)
def is_special(pat):
return "*" in pat or "?" in pat or "[" in pat
non_special_excludes = set()
match_excludes = set()
for pattern in excludes:
if is_special(pattern):
match_excludes.add(pattern)
else:
non_special_excludes.add(pattern)
def exclusion(path):
if path.as_posix() in non_special_excludes:
return True
for pattern in match_excludes:
result = path.match(pattern, match_entire=True)
if result:
return True
return False
return sorted(set([str(p) for p in includes_iterator() if not exclusion(p)]))
@provide_for_build
def get_base_path(build_env=None):
"""Get the base path to the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
Returns: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
"""
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `get_base_path()` at the top-level of an included file.")
return build_env.base_path
@provide_for_build
def add_deps(name, deps=[], build_env=None):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `add_deps()` at the top-level of an included file.")
if name not in build_env.rules:
raise ValueError(
'Invoked \'add_deps\' on non-existent rule %s.' % name)
rule = build_env.rules[name]
if 'deps' not in rule:
raise ValueError(
'Invoked \'add_deps\' on rule %s that has no \'deps\' field'
% name)
rule['deps'] = rule['deps'] + deps
class BuildFileProcessor(object):
def __init__(self, project_root, watchman_watch_root, watchman_project_prefix, build_file_name,
allow_empty_globs, watchman_client, watchman_error, implicit_includes=[],
extra_funcs=[]):
self._cache = {}
self._build_env_stack = []
self._sync_cookie_state = SyncCookieState()
self._project_root = project_root
self._watchman_watch_root = watchman_watch_root
self._watchman_project_prefix = watchman_project_prefix
self._build_file_name = build_file_name
self._implicit_includes = implicit_includes
self._allow_empty_globs = allow_empty_globs
self._watchman_client = watchman_client
self._watchman_error = watchman_error
lazy_functions = {}
for func in BUILD_FUNCTIONS + extra_funcs:
func_with_env = LazyBuildEnvPartial(func)
lazy_functions[func.__name__] = func_with_env
self._functions = lazy_functions
def _merge_globals(self, mod, dst):
"""
Copy the global definitions from one globals dict to another.
Ignores special attributes and attributes starting with '_', which
typically denote module-level private attributes.
"""
hidden = set([
'include_defs',
])
keys = getattr(mod, '__all__', mod.__dict__.keys())
for key in keys:
if not key.startswith('_') and key not in hidden:
dst[key] = mod.__dict__[key]
def _update_functions(self, build_env):
"""
Updates the build functions to use the given build context when called.
"""
for function in self._functions.itervalues():
function.build_env = build_env
def install_builtins(self, namespace):
"""
Installs the build functions, by their name, into the given namespace.
"""
for name, function in self._functions.iteritems():
namespace[name] = function.invoke
def _get_include_path(self, name):
"""
Resolve the given include def name to a full path.
"""
# Find the path from the include def name.
if not name.startswith('//'):
raise ValueError(
'include_defs argument "%s" must begin with //' % name)
relative_path = name[2:]
return os.path.join(self._project_root, relative_path)
def _include_defs(self, name, implicit_includes=[]):
"""
Pull the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._build_env_stack[-1]
# Resolve the named include to its path and process it to get its
# build context and module.
path = self._get_include_path(name)
inner_env, mod = self._process_include(
path,
implicit_includes=implicit_includes)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = inspect.currentframe()
while frame.f_globals['__name__'] == __name__:
frame = frame.f_back
self._merge_globals(mod, frame.f_globals)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(path)
build_env.includes.update(inner_env.includes)
def _push_build_env(self, build_env):
"""
Set the given build context as the current context.
"""
self._build_env_stack.append(build_env)
self._update_functions(build_env)
def _pop_build_env(self):
"""
Restore the previous build context as the current context.
"""
self._build_env_stack.pop()
if self._build_env_stack:
self._update_functions(self._build_env_stack[-1])
def _process(self, build_env, path, implicit_includes=[]):
"""
Process a build file or include at the given path.
"""
# First check the cache.
cached = self._cache.get(path)
if cached is not None:
return cached
# Install the build context for this input as the current context.
self._push_build_env(build_env)
# The globals dict that this file will be executed under.
default_globals = {}
# Install the 'include_defs' function into our global object.
default_globals['include_defs'] = functools.partial(
self._include_defs,
implicit_includes=implicit_includes)
# If any implicit includes were specified, process them first.
for include in implicit_includes:
include_path = self._get_include_path(include)
inner_env, mod = self._process_include(include_path)
self._merge_globals(mod, default_globals)
build_env.includes.add(include_path)
build_env.includes.update(inner_env.includes)
# Build a new module for the given file, using the default globals
# created above.
module = imp.new_module(path)
module.__file__ = path
module.__dict__.update(default_globals)
with open(path) as f:
contents = f.read()
# Enable absolute imports. This prevents the compiler from trying to
# do a relative import first, and warning that this module doesn't
# exist in sys.modules.
future_features = __future__.absolute_import.compiler_flag
code = compile(contents, path, 'exec', future_features, 1)
exec(code, module.__dict__)
# Restore the previous build context.
self._pop_build_env()
self._cache[path] = build_env, module
return build_env, module
def _process_include(self, path, implicit_includes=[]):
"""
Process the include file at the given path.
"""
build_env = IncludeContext()
return self._process(
build_env,
path,
implicit_includes=implicit_includes)
def _process_build_file(self, path, implicit_includes=[]):
"""
Process the build file at the given path.
"""
# Create the build file context, including the base path and directory
# name of the given path.
relative_path_to_build_file = os.path.relpath(
path, self._project_root).replace('\\', '/')
len_suffix = -len('/' + self._build_file_name)
base_path = relative_path_to_build_file[:len_suffix]
dirname = os.path.dirname(path)
build_env = BuildFileContext(
base_path,
dirname,
self._allow_empty_globs,
self._watchman_client,
self._watchman_watch_root,
self._watchman_project_prefix,
self._sync_cookie_state,
self._watchman_error)
return self._process(
build_env,
path,
implicit_includes=implicit_includes)
def process(self, path):
"""
Process a build file returning a dict of it's rules and includes.
"""
build_env, mod = self._process_build_file(
os.path.join(self._project_root, path),
implicit_includes=self._implicit_includes)
values = build_env.rules.values()
values.append({"__includes": [path] + sorted(build_env.includes)})
return values
def cygwin_adjusted_path(path):
if sys.platform == 'cygwin':
return subprocess.check_output(['cygpath', path]).rstrip()
else:
return path
# Inexplicably, this script appears to run faster when the arguments passed
# into it are absolute paths. However, we want the "buck.base_path" property
# of each rule to be printed out to be the base path of the build target that
# identifies the rule. That means that when parsing a BUILD file, we must know
# its path relative to the root of the project to produce the base path.
#
# To that end, the first argument to this script must be an absolute path to
# the project root. It must be followed by one or more absolute paths to
# BUILD files under the project root. If no paths to BUILD files are
# specified, then it will traverse the project root for BUILD files, excluding
# directories of generated files produced by Buck.
#
# All of the build rules that are parsed from the BUILD files will be printed
# to stdout by a JSON parser. That means that printing out other information
# for debugging purposes will likely break the JSON parsing, so be careful!
def main():
# Our parent expects to read JSON from our stdout, so if anyone
# uses print, buck will complain with a helpful "but I wanted an
# array!" message and quit. Redirect stdout to stderr so that
# doesn't happen. Actually dup2 the file handle so that writing
# to file descriptor 1, os.system, and so on work as expected too.
to_parent = os.fdopen(os.dup(sys.stdout.fileno()), 'a')
os.dup2(sys.stderr.fileno(), sys.stdout.fileno())
parser = optparse.OptionParser()
parser.add_option(
'--project_root',
action='store',
type='string',
dest='project_root')
parser.add_option(
'--build_file_name',
action='store',
type='string',
dest="build_file_name")
parser.add_option(
'--allow_empty_globs',
action='store_true',
dest='allow_empty_globs',
help='Tells the parser not to raise an error when glob returns no results.')
parser.add_option(
'--use_watchman_glob',
action='store_true',
dest='use_watchman_glob',
help='Invokes `watchman query` to get lists of files instead of globbing in-process.')
parser.add_option(
'--watchman_watch_root',
action='store',
type='string',
dest='watchman_watch_root',
help='Path to root of watchman watch as returned by `watchman watch-project`.')
parser.add_option(
'--watchman_project_prefix',
action='store',
type='string',
dest='watchman_project_prefix',
help='Relative project prefix as returned by `watchman watch-project`.')
parser.add_option(
'--watchman_query_timeout_ms',
action='store',
type='int',
dest='watchman_query_timeout_ms',
help='Maximum time in milliseconds to wait for watchman query to respond.')
parser.add_option(
'--include',
action='append',
dest='include')
(options, args) = parser.parse_args()
# Even though project_root is absolute path, it may not be concise. For
# example, it might be like "C:\project\.\rule".
#
# Under cygwin, the project root will be invoked from buck as C:\path, but
# the cygwin python uses UNIX-style paths. They can be converted using
# cygpath, which is necessary because abspath will treat C:\path as a
# relative path.
options.project_root = cygwin_adjusted_path(options.project_root)
project_root = os.path.abspath(options.project_root)
watchman_client = None
watchman_error = None
output_format = 'JSON'
output_encode = lambda val: json.dumps(val, sort_keys=True)
if options.use_watchman_glob:
import pywatchman
client_args = {}
if options.watchman_query_timeout_ms is not None:
# pywatchman expects a timeout as a nonnegative floating-point
# value in seconds.
client_args['timeout'] = max(0.0, options.watchman_query_timeout_ms / 1000.0)
watchman_client = pywatchman.client(**client_args)
watchman_error = pywatchman.WatchmanError
try:
import pywatchman.bser as bser
except ImportError, e:
import pywatchman.pybser as bser
output_format = 'BSER'
output_encode = lambda val: bser.dumps(val)
buildFileProcessor = BuildFileProcessor(
project_root,
options.watchman_watch_root,
options.watchman_project_prefix,
options.build_file_name,
options.allow_empty_globs,
watchman_client,
watchman_error,
implicit_includes=options.include or [])
buildFileProcessor.install_builtins(__builtin__.__dict__)
to_parent.write(output_format + '\n')
to_parent.flush()
for build_file in args:
build_file = cygwin_adjusted_path(build_file)
values = buildFileProcessor.process(build_file)
to_parent.write(output_encode(values))
to_parent.flush()
# "for ... in sys.stdin" in Python 2.x hangs until stdin is closed.
for build_file in iter(sys.stdin.readline, ''):
build_file = cygwin_adjusted_path(build_file)
values = buildFileProcessor.process(build_file.rstrip())
to_parent.write(output_encode(values))
to_parent.flush()
# Python tries to flush/close stdout when it quits, and if there's a dead
# pipe on the other end, it will spit some warnings to stderr. This breaks
# tests sometimes. Prevent that by explicitly catching the error.
try:
to_parent.close()
except IOError:
pass
|
{
"content_hash": "0a3aedcfe3a7a7de3dc33e5f528a973c",
"timestamp": "",
"source": "github",
"line_count": 749,
"max_line_length": 99,
"avg_line_length": 35.18825100133511,
"alnum_prop": 0.6202382759144028,
"repo_name": "artiya4u/buck",
"id": "0c82e8c890e52eb6bcd9a78c9fd8e9f5ad13d982",
"size": "26534",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/com/facebook/buck/json/buck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "87"
},
{
"name": "Batchfile",
"bytes": "683"
},
{
"name": "C",
"bytes": "245856"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "3765"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "623"
},
{
"name": "Go",
"bytes": "419"
},
{
"name": "Groff",
"bytes": "440"
},
{
"name": "HTML",
"bytes": "4938"
},
{
"name": "IDL",
"bytes": "128"
},
{
"name": "Java",
"bytes": "10529834"
},
{
"name": "JavaScript",
"bytes": "931231"
},
{
"name": "Lex",
"bytes": "2442"
},
{
"name": "Makefile",
"bytes": "1791"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "2956"
},
{
"name": "Objective-C",
"bytes": "77169"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "143"
},
{
"name": "Python",
"bytes": "202866"
},
{
"name": "Rust",
"bytes": "938"
},
{
"name": "Shell",
"bytes": "31126"
},
{
"name": "Smalltalk",
"bytes": "438"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
}
|
"""
raven.utils.stacks
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import inspect
import re
import sys
import warnings
from raven.utils.serializer import transform
from raven.utils import six
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
# Traceback (most recent call last):
# File "/Users/dcramer/Development/django-sentry/sentry/client/handlers.py", line 31, in emit
# get_client().create_from_record(record, request=request)
# File "/Users/dcramer/Development/django-sentry/sentry/client/base.py", line 325, in create_from_record
# data['__sentry__']['frames'] = varmap(shorten, get_stack_info(stack))
# File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 112, in get_stack_info
# pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
# File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 24, in get_lines_from_file
# source = loader.get_source(module_name)
# File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 287, in get_source
# fullname = self._fix_name(fullname)
# File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 262, in _fix_name
# "module %s" % (self.fullname, fullname))
# ImportError: Loader for module cProfile cannot handle module __main__
source = None
if source is not None:
source = source.splitlines()
if source is None:
try:
f = open(filename, 'rb')
try:
source = f.readlines()
finally:
f.close()
except (OSError, IOError):
pass
if source is None:
return None, None, None
encoding = 'utf8'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = _coding_re.search(line.decode('utf8')) # let's assume utf8
if match:
encoding = match.group(1)
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = min(lineno + 1 + context_lines, len(source))
try:
pre_context = [line.strip('\r\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\r\n')
post_context = [line.strip('\r\n') for line in source[(lineno + 1):upper_bound]]
except IndexError:
# the file may have changed since it was loaded into memory
return None, None, None
return pre_context, context_line, post_context
def label_from_frame(frame):
module = frame.get('module') or '?'
function = frame.get('function') or '?'
if module == function == '?':
return ''
return '%s in %s' % (module, function)
def get_culprit(frames, *args, **kwargs):
# We iterate through each frame looking for a deterministic culprit
# When one is found, we mark it as last "best guess" (best_guess) and then
# check it against ``exclude_paths``. If it isn't listed, then we
# use this option. If nothing is found, we use the "best guess".
if args or kwargs:
warnings.warn('get_culprit no longer does application detection')
best_guess = None
culprit = None
for frame in reversed(frames):
culprit = label_from_frame(frame)
if not culprit:
culprit = None
continue
if frame.get('in_app'):
return culprit
elif not best_guess:
best_guess = culprit
elif best_guess:
break
# Return either the best guess or the last frames call
return best_guess or culprit
def _getitem_from_frame(f_locals, key, default=None):
"""
f_locals is not guaranteed to have .get(), but it will always
support __getitem__. Even if it doesn't, we return ``default``.
"""
try:
return f_locals[key]
except Exception:
return default
def to_dict(dictish):
"""
Given something that closely resembles a dictionary, we attempt
to coerce it into a propery dictionary.
"""
if hasattr(dictish, 'iterkeys'):
m = dictish.iterkeys
elif hasattr(dictish, 'keys'):
m = dictish.keys
else:
raise ValueError(dictish)
return dict((k, dictish[k]) for k in m())
def iter_traceback_frames(tb):
"""
Given a traceback object, it will iterate over all
frames that do not contain the ``__traceback_hide__``
local variable.
"""
while tb:
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
f_locals = getattr(tb.tb_frame, 'f_locals', {})
if not _getitem_from_frame(f_locals, '__traceback_hide__'):
yield tb.tb_frame, getattr(tb, 'tb_lineno', None)
tb = tb.tb_next
def iter_stack_frames(frames=None):
"""
Given an optional list of frames (defaults to current stack),
iterates over all frames that do not contain the ``__traceback_hide__``
local variable.
"""
if not frames:
frames = inspect.stack()[1:]
for frame, lineno in ((f[0], f[2]) for f in frames):
f_locals = getattr(frame, 'f_locals', {})
if _getitem_from_frame(f_locals, '__traceback_hide__'):
continue
yield frame, lineno
def get_frame_locals(frame, transformer=transform, max_var_size=4096):
f_locals = getattr(frame, 'f_locals', None)
if not f_locals:
return None
if not isinstance(f_locals, dict):
# XXX: Genshi (and maybe others) have broken implementations of
# f_locals that are not actually dictionaries
try:
f_locals = to_dict(f_locals)
except Exception:
return None
f_vars = {}
f_size = 0
for k, v in six.iteritems(f_locals):
v = transformer(v)
v_size = len(repr(v))
if v_size + f_size < 4096:
f_vars[k] = v
f_size += v_size
return f_vars
def slim_frame_data(frames, frame_allowance=25):
"""
Removes various excess metadata from middle frames which go beyond
``frame_allowance``.
Returns ``frames``.
"""
frames_len = len(frames)
if frames_len <= frame_allowance:
return frames
half_max = frame_allowance / 2
for n in xrange(half_max, frames_len - half_max):
# remove heavy components
frames[n].pop('vars', None)
frames[n].pop('pre_context', None)
frames[n].pop('post_context', None)
return frames
def get_stack_info(frames, transformer=transform, capture_locals=True,
frame_allowance=25):
"""
Given a list of frames, returns a list of stack information
dictionary objects that are JSON-ready.
We have to be careful here as certain implementations of the
_Frame class do not contain the necessary data to lookup all
of the information we want.
"""
__traceback_hide__ = True # NOQA
result = []
for frame_info in frames:
# Old, terrible API
if isinstance(frame_info, (list, tuple)):
frame, lineno = frame_info
else:
frame = frame_info
lineno = frame_info.f_lineno
# Support hidden frames
f_locals = getattr(frame, 'f_locals', {})
if _getitem_from_frame(f_locals, '__traceback_hide__'):
continue
f_globals = getattr(frame, 'f_globals', {})
f_code = getattr(frame, 'f_code', None)
if f_code:
abs_path = frame.f_code.co_filename
function = frame.f_code.co_name
else:
abs_path = None
function = None
loader = _getitem_from_frame(f_globals, '__loader__')
module_name = _getitem_from_frame(f_globals, '__name__')
if lineno:
lineno -= 1
if lineno is not None and abs_path:
pre_context, context_line, post_context = get_lines_from_file(abs_path, lineno, 5, loader, module_name)
else:
pre_context, context_line, post_context = None, None, None
# Try to pull a relative file path
# This changes /foo/site-packages/baz/bar.py into baz/bar.py
try:
base_filename = sys.modules[module_name.split('.', 1)[0]].__file__
filename = abs_path.split(base_filename.rsplit('/', 2)[0], 1)[-1].lstrip("/")
except:
filename = abs_path
if not filename:
filename = abs_path
frame_result = {
'abs_path': abs_path,
'filename': filename,
'module': module_name or None,
'function': function or '<unknown>',
'lineno': lineno + 1,
}
if capture_locals:
f_vars = get_frame_locals(frame, transformer=transformer)
if f_vars:
frame_result['vars'] = f_vars
if context_line is not None:
frame_result.update({
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
})
result.append(frame_result)
stackinfo = {
'frames': slim_frame_data(result, frame_allowance=frame_allowance),
}
return stackinfo
|
{
"content_hash": "7e97ae3c7c5071d9efb82a2726314ce3",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 141,
"avg_line_length": 33.20195439739414,
"alnum_prop": 0.5897184342195625,
"repo_name": "ronaldevers/raven-python",
"id": "75a88d3927e28994c81168e81fc8af034495ab48",
"size": "10193",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "raven/utils/stacks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "52"
},
{
"name": "Makefile",
"bytes": "566"
},
{
"name": "Python",
"bytes": "343740"
},
{
"name": "Shell",
"bytes": "489"
}
],
"symlink_target": ""
}
|
class CaptchaBackend(object):
def setup(self, **kwargs):
pass
|
{
"content_hash": "343feff299eb00685eff070a98bae4d3",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 30,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6351351351351351,
"repo_name": "subeax/grab",
"id": "0d9efb64eb0f6ac0d2265c3ff52e1b89a50e493f",
"size": "74",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grab/captcha/backend/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "976"
},
{
"name": "Perl",
"bytes": "45"
},
{
"name": "Python",
"bytes": "739023"
},
{
"name": "Shell",
"bytes": "317"
}
],
"symlink_target": ""
}
|
from api.decorators import api_view, request_data_defaultdc
from api.permissions import IsAnyDcTemplateAdmin
from api.template.base.api_views import TemplateView
__all__ = ('template_list', 'template_manage')
@api_view(('GET',))
@request_data_defaultdc(permissions=(IsAnyDcTemplateAdmin,))
def template_list(request, data=None):
"""
List (:http:get:`GET </template>`) all server templates.
.. http:get:: /template
:DC-bound?:
* |dc-no|
:Permissions:
* |TemplateAdmin|
:Asynchronous?:
* |async-no|
:arg data.full: Return list of objects with all server template details (default: false)
:type data.full: boolean
:arg data.extended: Return list of objects with extended server template details (default: false)
:type data.extended: boolean
:arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name``, ``created`` (default: ``name``)
:type data.order_by: string
:status 200: SUCCESS
:status 403: Forbidden
"""
return TemplateView(request, None, data).get(many=True)
@api_view(('GET', 'POST', 'PUT', 'DELETE'))
@request_data_defaultdc(permissions=(IsAnyDcTemplateAdmin,))
def template_manage(request, name, data=None):
"""
Show (:http:get:`GET </template/(name)>`), create (:http:post:`POST </template/(name)>`)
update (:http:put:`PUT </template/(name)>`) or delete (:http:delete:`DELETE </template/(name)>`)
a server template.
.. note:: Server templates are only loosely validated during creation and updating. Whether a server template \
will be correctly applied to a virtual server depends on various circumstances during server definition and deployment \
(e.g. availability of compute nodes, storages, networks, server images, users and other resources \
in a virtual datacenter).
.. http:get:: /template/(name)
:DC-bound?:
* |dc-yes| - ``dc_bound=true``
* |dc-no| - ``dc_bound=false``
:Permissions:
* |TemplateAdmin| - ``dc_bound=true``
* |SuperAdmin| - ``dc_bound=false``
:Asynchronous?:
* |async-no|
:arg name: **required** - Server template name
:type name: string
:arg data.extended: Display extended server template details (default: false)
:type data.extended: boolean
:status 200: SUCCESS
:status 403: Forbidden
:status 404: Template not found
.. http:post:: /template/(name)
:DC-bound?:
* |dc-yes| - ``dc_bound=true``
* |dc-no| - ``dc_bound=false``
:Permissions:
* |TemplateAdmin| - ``dc_bound=true``
* |SuperAdmin| - ``dc_bound=false``
:Asynchronous?:
* |async-no|
:arg name: **required** - Server template name
:type name: string
:arg data.alias: Short server template name (default: ``name``)
:type data.alias: string
:arg data.access: Access type (1 - Public, 3 - Private, 4 - Deleted) (default: 3)
:type data.access: integer
:arg data.owner: User that owns the server template (default: logged in user)
:type data.owner: string
:arg data.desc: Template image description
:type data.desc: string
:arg data.ostype: Operating system type (null - all OS types, 1 - Linux VM, 2 - SunOS VM, 3 - BSD VM, \
4 - Windows VM, 5 - SunOS Zone, 6 - Linux Zone) (default: null)
:type data.ostype: integer
:arg data.dc_bound: Whether the server template is bound to a datacenter (requires |SuperAdmin| permission) \
(default: true)
:type data.dc_bound: boolean
:arg data.dc: Name of the datacenter the server template will be attached to (**required** if DC-bound)
:type data.dc: string
:arg data.vm_define: :http:get:`Server definition object </vm/(hostname_or_uuid)/define>` (default: {})
:type data.vm_define: object
:arg data.vm_define_disk: List of \
:http:get:`server disk definition objects </vm/(hostname_or_uuid)/define/disk/(disk_id)>` (default: [])
:type data.vm_define_disk: array
:arg data.vm_define_nic: List of \
:http:get:`server NIC definition objects </vm/(hostname_or_uuid)/define/nic/(nic_id)>` (default: [])
:type data.vm_define_nic: array
:arg data.vm_define_snapshot: List of \
:http:get:`server snapshot definition objects </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>` (default: [])
:type data.vm_define_snapshot: array
:arg data.vm_define_backup: List of \
:http:get:`server backup definition objects </vm/(hostname_or_uuid)/define/backup/(bkpdef)>` (default: [])
:type data.vm_define_backup: array
:status 201: SUCCESS
:status 400: FAILURE
:status 403: Forbidden
:status 404: Datacenter not found
:status 406: Template already exists
.. http:put:: /template/(name)
:DC-bound?:
* |dc-yes| - ``dc_bound=true``
* |dc-no| - ``dc_bound=false``
:Permissions:
* |TemplateAdmin| - ``dc_bound=true``
* |SuperAdmin| - ``dc_bound=false``
:Asynchronous?:
* |async-no|
:arg name: **required** - Server template name
:type name: string
:arg data.alias: Short server template name
:type data.alias: string
:arg data.access: Access type (1 - Public, 3 - Private, 4 - Deleted)
:type data.access: integer
:arg data.owner: User that owns the server template
:type data.owner: string
:arg data.desc: Template image description
:type data.desc: string
:arg data.ostype: Operating system type (null - all OS types, 1 - Linux VM, 2 - SunOS VM, 3 - BSD VM, \
4 - Windows VM, 5 - SunOS Zone, 6 - Linux Zone)
:type data.ostype: integer
:arg data.dc_bound: Whether the server template is bound to a datacenter (requires |SuperAdmin| permission)
:type data.dc_bound: boolean
:arg data.vm_define: :http:get:`Server definition object </vm/(hostname_or_uuid)/define>`
:type data.vm_define: object
:arg data.vm_define_disk: List of \
:http:get:`server disk definition objects </vm/(hostname_or_uuid)/define/disk/(disk_id)>`
:type data.vm_define_disk: array
:arg data.vm_define_nic: List of \
:http:get:`server NIC definition objects </vm/(hostname_or_uuid)/define/nic/(nic_id)>`
:type data.vm_define_nic: array
:arg data.vm_define_snapshot: List of \
:http:get:`server snapshot definition objects </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>`
:type data.vm_define_snapshot: array
:arg data.vm_define_backup: List of \
:http:get:`server backup definition objects </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`
:type data.vm_define_backup: array
:status 200: SUCCESS
:status 400: FAILURE
:status 403: Forbidden
:status 404: Template not found
.. http:delete:: /template/(name)
.. note:: A server template cannot be deleted when it is used by even one virtual server. In order to disable \
further use of such a server template, the template can be marked as deleted by \
:http:put:`changing its access property to deleted (4) </template/(name)>`.
:DC-bound?:
* |dc-yes| - ``dc_bound=true``
* |dc-no| - ``dc_bound=false``
:Permissions:
* |TemplateAdmin| - ``dc_bound=true``
* |SuperAdmin| - ``dc_bound=false``
:Asynchronous?:
* |async-no|
:arg name: **required** - Server template name
:type name: string
:status 200: SUCCESS
:status 400: FAILURE
:status 403: Forbidden
:status 404: Template not found
:status 428: Template is used by some VMs
"""
return TemplateView(request, name, data).response()
|
{
"content_hash": "6b77a78278d541835c73f7a0a20b5b31",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 120,
"avg_line_length": 44.2,
"alnum_prop": 0.6187782805429864,
"repo_name": "erigones/esdc-ce",
"id": "c11f8665602adc0c30c894dcda30a036bbb3a6eb",
"size": "7956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/template/base/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
}
|
import sys
import re
from itertools import chain
_PY3 = sys.version_info[0] > 2
if _PY3:
basestring = unicode = str
def is_string(v):
return isinstance(v, basestring)
def stringify(v):
"""
Return a string. If it's already a string, just return that. Otherwise,
stringify it. Under Python 3, this makes perfect sense. Under Python 2,
if the string contains Unicode octets (e.g. UTF-8 bytes, because it's
really a byte string pretending to be a full string), casting to unicode
isn't safe. Solution: Use only for Unicode strings.
"""
return v if isinstance(v, basestring) else unicode(v)
def halfstr(s):
"""
Split the given string, returning each half in a list.
"""
length = len(s)
half = length // 2
if half * 2 != length:
raise ValueError('string {0!r} not of even length'.format(s))
return [s[:half], s[half:]]
def listing(l):
"""
Return a list from either None, an iterable, or a whitespace-separated
string listing.
"""
if not l:
return []
return l.strip().split() if isinstance(l, basestring) else list(l)
def is_sequence(arg):
"""
Is a list, set etc. Not a string.
"""
if hasattr(arg, "__iter__") or hasattr(arg, "__getitem__"):
if not hasattr(arg, "strip"):
return True
return False
def blanknone(v):
"""
Return a value, or empty string if it's None.
"""
return '' if v is None else v
def pad(seq, n, fillvalue=None):
"""
You want a sequence at least n items long, say for an unpacking
operation. You might not have n items. This will right-pad your
sequence into one of the desired length, using your
desired fillvalue (by default, None).
"""
length = len(seq)
if length == n:
return seq
else:
needed = n - length
return seq + type(seq)([fillvalue] * needed)
# itertools equivalent:
# return chain(iterable, repeat(fillvalue, n-len(iterable)))
def parse_css_selector(spec):
"""
Parse a CSS-style selector (similar to what jQuery and CSS use).
Returns an dict with tag_, id, and class, and other key, value
fields.
"""
if spec is None:
return {}
# use regular expressions to parse the selector spec
tagnames = re.findall(r'^(\w+)', spec)
classes = re.findall(r'\.([\w\-]+)', spec)
ids = re.findall(r'\#([\w\-]+)', spec)
attrs = re.findall(r'\[([\w-]+=[^]]*)\]', spec)
# make some basic assertions
assert len(tagnames) <= 1
assert len(ids) <= 1
# construct the result dict
atts = {}
if ids:
atts['id'] = ids[0]
if classes:
atts['class'] = ' '.join(classes)
if tagnames:
atts['_tag'] = tagnames[0]
for a in attrs:
k, v = a.split('=')
atts[k] = v
return atts
def style_attribs(spec):
"""
If spec is a dictionary, assume it's the one we want. Else,
parse it.
"""
if spec is None:
return {}
elif isinstance(spec, dict):
return spec
else:
return parse_css_selector(spec)
def combine_classes(c1=None, c2=None):
c1list = (c1 or '').strip().split()
c2list = (c2 or '').strip().split()
if not (c1list or c2list):
return None
classes = []
seen = set()
for c in chain(c1list, c2list):
if c not in seen:
classes.append(c)
seen.add(c)
return ' '.join(classes)
def update_style_dict(d, other, reverse_class=True):
"""
Update one style dict d, with the other style dict.
Very like a dictionary update, except that the class attribute
can be multi-valued and additive, not replaced.
reverse_class, if True, means to process the other's
class value (if any) before the receiving dictionary's.
This may seem an odd, even wrong, choice. It's actually
the right choice in the case one is applying a series of
updates and wishes the last update to be the higest
priority, over-rides all others update (but still have its
class names to appear first in the list). If you don't like
this choice, set it to False and no harm done. Either way
is proper CSS.
"""
if other is None or len(other) == 0: # nothing to merge; quick exit
return
dclass = d.get('class')
oclass = other.get('class')
d.update(other)
if dclass or oclass:
clslist = [oclass, dclass] if reverse_class else [dclass, oclass]
d['class'] = combine_classes(*clslist)
|
{
"content_hash": "b707f88a0bdde7a05e3bc32978eb6772",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 76,
"avg_line_length": 27.349397590361445,
"alnum_prop": 0.608590308370044,
"repo_name": "jonathaneunice/quoter",
"id": "6626c738e153ab0469dab6279749568600d97266",
"size": "4541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quoter/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55041"
}
],
"symlink_target": ""
}
|
import requests
from bs4 import BeautifulSoup
import processor
from datetime import datetime
def parse( url ):
r = requests.get( url )
if r.status_code == 404:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
r.encoding = 'UTF-8'
soup = BeautifulSoup( r.text, "html.parser" )
article = soup.find( 'article' )
if article == None:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
processor.decompose_all( article.find_all( 'script' ) )
datetime_list = processor.collect_datetime_objects( article.find_all( 'time' ), 'datetime' )
author = processor.collect_text( article.find( class_ = 'posted-on' ) )
author = author.replace( ' |', '' )
processor.decompose( article.find( class_ = 'entry-meta' ) )
title = processor.collect_text( article.find( class_ = 'entry-title' ) )
ingress = processor.collect_text( article.find( class_ = 'entry-content__ingress' ) )
processor.decompose( article.find( class_ = 'entry-content__ingress' ) )
images = processor.collect_images( article.find_all( 'img' ), 'src', '' )
captions = processor.collect_image_captions( article.find_all( class_ = 'entry-header__caption' ) )
text = processor.collect_text( article.find( class_ = 'entry-content' ) )
return processor.create_dictionary('Verkkouutiset', url, r.status_code, [u''], datetime_list, author, title, ingress, text, images, captions)
if __name__ == '__main__':
parse("http://www.verkkouutiset.fi/talous/ammattisijoittajan_neuvot-33352", file('verkkouutiset.txt', 'w'))
|
{
"content_hash": "7e0499fb9ee802b5dfdce25767daaf50",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 142,
"avg_line_length": 40.125,
"alnum_prop": 0.6672897196261682,
"repo_name": "HIIT/mediacollection",
"id": "38ba0c9f5e3fa5b510b600ed2e81a8dfdb715c40",
"size": "1630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sites/verkkouutiset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "197865"
}
],
"symlink_target": ""
}
|
import hypergan as hg
import pytest
class TestParser:
@pytest.fixture(autouse=True)
def setup(self):
self.parser = hg.parser.Parser()
def parse(self, string):
return self.parser.parse_string(string).to_list()
def test_parses_simple_string(self):
line = "conv2d"
assert self.parse(line) == ["conv2d", [], {}]
def test_parses_args_int(self):
line = "conv2d 256"
assert self.parse(line) == ["conv2d", [256], {}]
def test_parses_args_float(self):
line = "conv2d 256.0"
assert self.parse(line) == ["conv2d", [256.0], {}]
def test_parses_args_str(self):
line = "conv2d cat"
assert self.parse(line) == ["conv2d", ["cat"], {}]
def test_parses_options_int(self):
line = "conv2d cat=2"
assert self.parse(line) == ["conv2d", [], {"cat": 2}]
def test_parses_options_float(self):
line = "conv2d cat=2.0"
assert self.parse(line) == ["conv2d", [], {"cat": 2.0}]
def test_parses_options_bool(self):
line = "conv2d cat=true"
assert self.parse(line) == ["conv2d", [], {"cat": True}]
def test_parses_options_bool_false(self):
line = "conv2d cat=false"
assert self.parse(line) == ["conv2d", [], {"cat": False}]
def test_parses_options_none(self):
line = "conv2d cat=null"
assert self.parse(line) == ["conv2d", [], {"cat": None}]
def test_parses_options_str(self):
line = "conv2d cat2=cat"
assert self.parse(line) == ["conv2d", [], {"cat2": "cat"}]
def test_parses_options_args_str(self):
line = "conv2d cat cat=cat "
assert self.parse(line) == ["conv2d", ["cat"], {"cat": "cat"}]
def test_parses_options_messy_options(self):
line = "conv2d cat2 = cat2 cat1=cat1"
assert self.parse(line) == ["conv2d", [], {"cat2": "cat2", "cat1": "cat1"}]
def test_parses_options_clobber_options(self):
line = "conv2d options=1"
assert self.parse(line) == ["conv2d", [], {"options": 1}]
def test_parses_configurable_param(self):
line = "conv2d (conv2d test)"
obj = self.parse(line)[1][0][0] #TODO why is this nested, should be just [1]
assert obj.to_list() == ["conv2d", ["test"], {}]
def test_parses_configurable_param(self):
line = "conv2d (conv2d test)"
obj = self.parse(line)[1][0][0] #TODO why is this nested, should be just [1]
assert type(obj) == hg.parser.Pattern
def test_parses_configurable_param(self):
line = "conv2d (conv2d test)"
line = "add self (conv filter=1) (conv filter=3) (modulated_conv filter=1)"
obj = self.parse(line)[1][0]
assert obj == "self"
obj = self.parse(line)[1][1][0] #TODO why is this nested, should be just [1]
assert obj.to_list() == ["conv", [], {"filter": 1}]
def test_parses_configurable_param_in_options(self):
line = "conv2d options=(conv2d test)"
assert self.parse(line)[2]["options"][0].to_list() == ["conv2d", ["test"], {}]
def test_parses_size(self):
line = "reshape 64*64*3"
assert self.parse(line) == ["reshape", ["64*64*3"], {}]
def test_parses_multiple_args(self):
line = "concat layer i"
assert self.parse(line) == ["concat", ["layer", "i"], {}]
def test_parses_underscore_in_options(self):
line = "identity a_b=3"
assert self.parse(line) == ["identity", [], {"a_b": 3}]
|
{
"content_hash": "638a62f5d8e0b7037bb64b17506e8537",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 86,
"avg_line_length": 35.25252525252525,
"alnum_prop": 0.566189111747851,
"repo_name": "255BITS/HyperGAN",
"id": "eed4d60848221cdbd879d8b6edab29686fbd50ba",
"size": "3490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "204346"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.gradients."""
import warnings
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
# pylint: disable=unused-import
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import state_grad
# pylint: enable=unused-import
from tensorflow.python.ops.constant_op import constant
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients._GatherInputs(to_ops, reached_ops)
between_ops.sort(lambda x, y: y._id - x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.pack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.pack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat(0, [t4, t3])
t6 = constant([2.0])
t7 = array_ops.concat(0, [t5, t6])
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = array_ops.concat(0, [t3, t3, t3])
t5 = constant([1.0])
t6 = array_ops.concat(0, [t4, t5])
t7 = array_ops.concat(0, [t6, t3])
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(0, 2, wx)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:0", gw.device)
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:1", gw1.device)
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertEquals(None, gw2.device)
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default() as g:
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all([x for x in grads]))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all([x for x in grads]))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=
gradients.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default() as g:
@ops.RegisterGradient("TestOp")
def _TestOpGrad(op, float_grad, string_grad):
"""Gradient function for TestOp."""
self.assertEquals(float_grad.dtype, types.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterShape("TestOp")(None)
c = constant(1.0)
x, y = g.create_op("TestOp", [c], [types.float32, types.string]).outputs
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
class StopGradientTest(test_util.TensorFlowTestCase):
def testStopGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.stop_gradient(inp)
igrad = gradients.gradients(out, inp)[0]
assert igrad is None
class HessianVectorProductTest(test_util.TensorFlowTestCase):
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = hess_v.eval()
self.assertAllClose(hess_v_value, hess_v_actual)
class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
def testIndexedSlicesToTensor(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testInt64Indices(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
c_sparse = ops.IndexedSlices(
c_sparse.values, math_ops.cast(c_sparse.indices, types.int64),
c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testWarnings(self):
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(types.float32),
array_ops.placeholder(types.int32),
constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(types.float32),
array_ops.placeholder(types.int32),
constant([100, 100, 100, 100]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory."
in str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(types.float32),
array_ops.placeholder(types.int32),
array_ops.placeholder(types.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory."
in str(w[0].message))
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "1e8f59318a4693fc24d22b81720f3ab5",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 78,
"avg_line_length": 37.27893175074184,
"alnum_prop": 0.6180052535222479,
"repo_name": "aksaxena80/test",
"id": "96af01e1d83d12c616ba3c6731b4faf19834cc42",
"size": "12563",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/gradients_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127080"
},
{
"name": "C++",
"bytes": "4875335"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "631255"
},
{
"name": "Java",
"bytes": "44192"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "44898"
},
{
"name": "Python",
"bytes": "2425565"
},
{
"name": "Shell",
"bytes": "1036"
},
{
"name": "TypeScript",
"bytes": "236089"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
setup(
name='stt-watson',
version='1.0.3',
packages=['utils', 'config', 'recording', 'watson_client', 'watson_client.websocket', 'stt_watson'],
url='https://github.com/HomeHabbit/stt-watson',
license='MIT',
author='Arthur Halet',
author_email='arthurh.halet@gmail.com',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
description='Continuous speech to text using watson in python with websocket and record from microphone',
keywords='text-to-speech watson websocket',
classifiers=['Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License'],
platforms='ALL',
install_requires=[
'autobahn>=0.10.9',
'pyOpenSSL>=0.13.1',
'requests>=2.8.1',
'Twisted>=13.2.0',
'txaio>=2.0.4',
'pyaudio>=0.2.9',
'pyyaml>=3.08',
],
package_data={
'config': ['config.sample.yml']
},
entry_points={
'console_scripts': [
'stt-watson=stt_watson.__main__:main',
],
},
)
|
{
"content_hash": "7309f39530e70687c9b5e35f0778e5d2",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 109,
"avg_line_length": 33.473684210526315,
"alnum_prop": 0.5786163522012578,
"repo_name": "HomeHabbit/stt-watson",
"id": "8d7e970e7163a4fb4d94c49178fe74953aaa7040",
"size": "1272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22297"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from .models import PublicCategories, PrivateCategories, FeedMapper, Feeds, UserSubscriptions, FeedStatus, UserSubscriptions
#Default Serializers
class FeedStatusSerializer(serializers.ModelSerializer):
class Meta:
model = FeedStatus
class UserSubscriptionsSerializer(serializers.ModelSerializer):
class Meta:
model = UserSubscriptions
class PublicCategoriesSerializer(serializers.ModelSerializer):
class Meta:
model = PublicCategories
class PrivateCategoriesSerializer(serializers.ModelSerializer):
class Meta:
model = PrivateCategories
class FeedMapperSerializer(serializers.ModelSerializer):
class Meta:
model = FeedMapper
class FeedsSerializer(serializers.ModelSerializer):
class Meta:
model = Feeds
class UserSubscriptionsSerializer(serializers.ModelSerializer):
class Meta:
model = UserSubscriptions
#Create Serializer
class PrivateCategoriesCreateSerializer(serializers.ModelSerializer):
class Meta:
model = PrivateCategories
fields = ('name', 'description')
#List Serializer
class UserSubscriptionsListSerializer(serializers.ModelSerializer):
feed = serializers.SerializerMethodField('getFeedUrl')
category = serializers.SerializerMethodField('getCategory')
def getFeedUrl(self, obj):
return {'pk':obj.feeds.pk, 'url':obj.feeds.url, 'feedurl':obj.feeds.feed_url }
def getCategory(self, obj):
if obj.category is not None:
return {'pk':obj.category.pk, 'name':obj.category.name, 'description':obj.category.description }
else:
return {'pk':'', 'name':'', 'description':'' }
class Meta:
model = UserSubscriptions
|
{
"content_hash": "d8fe3e65107ebfd5f1f66c5630f73cfd",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 124,
"avg_line_length": 32.592592592592595,
"alnum_prop": 0.7284090909090909,
"repo_name": "parthz/buku",
"id": "eee2c35ea798ec551a4908724fa4571ac36c7ff1",
"size": "1760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/services/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "69734"
},
{
"name": "HTML",
"bytes": "10428"
},
{
"name": "JavaScript",
"bytes": "20429"
},
{
"name": "Python",
"bytes": "40184"
}
],
"symlink_target": ""
}
|
import numpy as np
epsilon = 0.000000001
def build_synapses(layers):
"""
Construct synapses to forward propagate between layers.
These are matrices that will map from layer[n] to layer[n+1]
:param layers: an array describing the size of each layer (number of nodes).
The length of the array indicates the number of hidden layers
Must include the size of the input layer and the output layer, i.e. minimum length == 2
:return: An array of numpy matrices that describe the synapses of a neural network,
initialized to mean centered random values x: x<1 & x>-1
"""
if len(layers) < 2:
raise Warning('need at least two layers')
synapses = []
for l in range(1, len(layers)):
# for the first layer, the input size is the number of features in training data
n = layers[l - 1]
m = layers[l]
# add a random mean-centered synapse of size; 2 * random(0,1) - 1
# the synapse is of height n+1 because n inputs plus 1 for bias/constant
synapses += [2 * np.random.random((n + 1, m)) - 1]
return synapses
def forward_propagate(a1, thetas):
"""
Push input data through the neural network
:param a1: Initial layer of data (raw features)
:param thetas: Set of thetas to act in synapses
:return: Tuple of a and z matrices;
a_(n+1) = sigmoid(z_n)
z_n = a_n * theta_n
"""
a_ = [a(a1)]
z_ = []
# for each synapse, push through in_data and get out_data
for i in range(len(thetas)):
x = a_[i]
theta = thetas[i]
z_.append(z(x, theta))
output = g(z_[-1])
a_.append(a(output))
# the last a value shouldn't have bias term - it's the network output. Truncate the bias column
a_[-1] = a_[-1][:, 1:]
return a_, z_
def a(x):
"""
Compute the a value for x with additional bias input
:param x: the input value for a matrix computation
:return: X with appended column of 1s for bias
"""
# add extra column for bias/constant term
a_ = np.matrix(np.empty((x.shape[0], x.shape[1] + 1)))
a_[:, 0] = 1
a_[:, 1:] = x
return a_
def z(a_, theta):
"""
Compute the z value of a*theta; does not append the bias term
:param a_: The input data to this synapse
:param theta: The theta value for this synapse
:return: a*theta
"""
return a_.dot(theta)
def g(x):
"""
Get the value of the sigmoid function at x
:param x: Matrix or array of real values
:return: The value of the sigmoid function at point(s) x
"""
x[x < -100] = -100
return 1/(1+np.exp(-x))
def g_prime(x):
"""
Get the derivative of the sigmoid function at x
:param x: Scalar, matrix, array etc of real values
:return: The derivative of the sigmoid function at point(s) x
"""
return np.multiply(g(x), (1-g(x)))
def delta(theta, output_error, activation):
"""
Get the error for a hidden layer (no explict labels)
:param theta: The synapse to which this layer is an input
:param output_error: The upstream error, backed out with the synapse
:param activation: The activation for this layer
:return: delta for the hidden layer
"""
# construct the activation matrix with an addition bias term
a_ = a(activation)
slope = np.multiply(a_, 1 - a_)
result = np.multiply(theta.dot(output_error.T), slope.T)
return result
def cost(estimated, y, thetas=[], l=0):
"""
Compute the cost of a neural network vs the expect / label output
:param estimated: The output of the final layer of a neural network - the estimation
:param y: The expected value for output of the neural network - the label
:param thetas: Coefficients used in neural network (regularization)
:param l: lambda value - regularization parameter
:return: Cost of the network
"""
# the number of observations
m = y.shape[0]
# take log of the estimated value so that the 'cost' of predicting 1 is 0, and the 'cost' of predicting zero -> inf
# multiply by y, i.e. when y = 1 we should have estimated 1; an estimate closer to zero is high cost i.e. log(0)
# -y * log(estimated)
# the reverse applies for y = 0; we want 'high cost' when y = 0 and estimation -> 1; so use log(1 - est) -> inf.
# and multiply by 1 - y, i.e. 1 when y == 0
estimated[estimated >= 1] = 1 - epsilon
estimated[estimated <= 0] = epsilon
gap = np.multiply(-y, np.log(estimated)) - np.multiply(1-y, np.log(1-estimated))
j = 1.0 / m * np.sum(gap)
# if thetas are supplied for regularization, return sum of squares
for t in thetas:
# remove the bias / constant term
t_ = t[:, 1:]
j += l/(2.0*m) * np.sum(np.multiply(t_, t_))
return j
def theta_prime(a, z, theta, y, l = 0):
"""
Compute the gradient for the theta terms with respect to the cost
:param a: Array of input / post-sigmoid matrices : a_(n+1) = sigmoid(z_n)
:param z: Array of interim matrices : z_n = a_n * theta_n
:param theta: Array of coefficient matrices
:param y: Expected output, i.e. labels
:param l: lambda value to adjust regularization effect
:return:
"""
m = y.shape[0]
# the last a value is the network output
out = a[-1]
# compute d values
# the first computed d value is the network output - y
d = [out - y]
i = len(theta) - 1
while len(d) < len(theta):
# d value that is currently first is the last layer we 'backpropagated'
d_ = d[0]
t_ = theta[i]
i -= 1
# theta[i] corresponds to z[i-1] - there is no theta[L] and there is no z[0]
z_ = z[i]
# compute the d value for this layer
# transpose the theta term as we are 'reversing' the operation.
new_value = np.multiply((d_ * t_.T[:, 1:]), g_prime(z_))
d.insert(0, new_value)
big_delta = []
theta_p = []
# compute big delta value & theta_prime values
for (ix, d_) in enumerate(d):
# the 'first' d value is d2; multiply it by the first a value a1 - and so on
big_delta.append(a[ix].T * d_)
regularization_term = float(l) / m * theta[ix]
theta_p.append(big_delta[-1] / m + regularization_term)
return theta_p
def softmax(z_):
"""
Softmax is a normalized expontential function; i.e. exp(z_n) / sum(exp(z_k)) for all k.
Gives the relative likelihood / confidence of a prediction.
:param z_: Matrix of predictions
:return:
"""
# construct a k * 1 matrix;
# to 'sum' the values for each row we simply multiply z_ * o (sum/collapse) * o.T (project)
features = z_.shape[1]
o = np.ones((features, 1))
# put a ceiling on these values; taking exp(big_number) causes overflow
z_[z_ > 100] = 100
return np.exp(z_) / (np.exp(z_) * o * o.T)
|
{
"content_hash": "a2356e08d577e1868628104ea7778036",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 119,
"avg_line_length": 32.56872037914692,
"alnum_prop": 0.6158323632130385,
"repo_name": "kirkbroadhurst/simple_neural_network",
"id": "b0c1c29d8eb321842df31ddca81c14aa741316ef",
"size": "6872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplenet/lib/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20629"
}
],
"symlink_target": ""
}
|
"""
Internal module for formatting output data in csv, html,
and latex files. This module also applies to display formatting.
"""
from __future__ import print_function
from distutils.version import LooseVersion
# pylint: disable=W0141
from textwrap import dedent
from pandas.core.dtypes.missing import isnull, notnull
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_float_dtype,
is_period_arraylike,
is_integer_dtype,
is_interval_dtype,
is_datetimetz,
is_integer,
is_float,
is_numeric_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
is_list_like)
from pandas.core.dtypes.generic import ABCSparseArray
from pandas.core.base import PandasObject
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import (StringIO, lzip, range, map, zip, u,
OrderedDict, unichr)
from pandas.io.formats.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
from pandas.io.common import (_get_handle, UnicodeWriter, _expand_user,
_stringify_path)
from pandas.io.formats.printing import adjoin, justify, pprint_thing
from pandas.io.formats.common import get_level_lengths
import pandas.core.common as com
import pandas._libs.lib as lib
from pandas._libs.tslib import (iNaT, Timestamp, Timedelta,
format_array_from_datetime)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
import pandas as pd
import numpy as np
import itertools
import csv
common_docstring = """
Parameters
----------
buf : StringIO-like, optional
buffer to write to
columns : sequence, optional
the subset of columns to write; default None writes all columns
col_space : int, optional
the minimum width of each column
header : bool, optional
%(header)s
index : bool, optional
whether to print index (row) labels, default True
na_rep : string, optional
string representation of NAN to use, default 'NaN'
formatters : list or dict of one-parameter functions, optional
formatter functions to apply to columns' elements by position or name,
default None. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats,
default None. The result of this function must be a unicode string.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every
multiindex key at each row, default True
index_names : bool, optional
Prints the names of the indexes, default True
line_width : int, optional
Width to wrap a line in characters, default no wrap"""
justify_docstring = """
justify : {'left', 'right'}, default None
Left or right-justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box."""
return_docstring = """
Returns
-------
formatted : string (or unicode, depending on data and options)"""
docstring_to_string = common_docstring + justify_docstring + return_docstring
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True, na_rep='NaN',
footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.length:
if footer:
footer += ', '
footer += "Length: %d" % len(self.categorical)
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return compat.text_type(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None, na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return u('')
fmt_values = self._get_formatted_values()
result = ['%s' % i for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[') + result + u(']')]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return compat.text_type(u('\n').join(result))
class SeriesFormatter(object):
def __init__(self, series, buf=None, length=True, header=True, index=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.index = index
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = _get_adjustment()
self._chk_truncate()
def _chk_truncate(self):
from pandas.core.reshape.concat import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num],
series.iloc[-row_num:]))
self.tr_row_num = row_num
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: %s' % self.series.index.freqstr
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += ("Name: %s" % series_name) if name is not None else ""
if (self.length is True or
(self.length == 'truncate' and self.truncate_v)):
if footer:
footer += ', '
footer += 'Length: %d' % len(self.series)
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += 'dtype: %s' % pprint_thing(name)
# level infos are added to the end and in a new line, like it is done
# for Categoricals
if is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return compat.text_type(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, MultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
return format_array(self.tr_series._values, None,
float_format=self.float_format, na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return 'Series([], ' + footer + ')'
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = '...'
else:
dot_str = '..'
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode='center')[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, '')
if self.index:
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
else:
result = self.adj.adjoin(3, fmt_values).replace('\n ',
'\n').strip()
if self.header and have_header:
result = fmt_index[0] + '\n' + result
if footer:
result += '\n' + footer
return compat.text_type(u('').join(result))
class TextAdjustment(object):
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text):
return compat.strlen(text, encoding=self.encoding)
def justify(self, texts, max_len, mode='right'):
return justify(texts, max_len, mode=mode)
def adjoin(self, space, *lists, **kwargs):
return adjoin(space, *lists, strlen=self.len,
justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super(EastAsianTextAdjustment, self).__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
def len(self, text):
return compat.east_asian_len(text, encoding=self.encoding,
ambiguous_width=self.ambiguous_width)
def justify(self, texts, max_len, mode='right'):
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == 'left':
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == 'center':
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment():
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter(object):
is_truncated = False
show_dimensions = None
@property
def should_show_dimensions(self):
return (self.show_dimensions is True or
(self.show_dimensions == 'truncate' and self.is_truncated))
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ''
__doc__ += common_docstring + justify_docstring + return_docstring
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, decimal='.', **kwds):
self.frame = frame
if buf is not None:
self.buf = _expand_user(_stringify_path(buf))
else:
self.buf = StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.decimal = decimal
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = _ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self):
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
"""
from pandas.core.reshape.concat import concat
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal
# (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = (self.header + dot_row + show_dimension_rows +
prompt_row)
# rows available to fill with actual data
max_rows_adj = self.h - n_add_rows
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
# screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = concat((frame.iloc[:, :col_num],
frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :],
frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
if not is_list_like(self.header) and not self.header:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0),
adj=self.adj)
stringified.append(fmt_values)
else:
if is_list_like(self.header):
if len(self.header) != len(self.columns):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(self.columns), len(self.header))))
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
header_colwidth = max(self.col_space or 0,
*(self.adj.len(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=header_colwidth,
adj=self.adj)
max_len = max(np.max([self.adj.len(x) for x in fmt_values]),
header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
# infer from column header
col_width = self.adj.len(strcols[self.tr_size_col][0])
strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] *
(len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_mode = 'left'
elif is_dot_col:
cwidth = self.adj.len(strcols[self.tr_size_col][0])
dot_mode = 'center'
else:
dot_mode = 'right'
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s') %
(type(self.frame).__name__,
pprint_thing(frame.columns),
pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print
# the whole frame
text = self.adj.adjoin(1, *strcols)
elif (not isinstance(self.max_cols, int) or
self.max_cols > 0): # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split('\n')
row_lens = Series(text).apply(len)
max_len_col_ix = np.argmax(row_lens)
max_len = row_lens[max_len_col_ix]
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See
# `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
max_len += size_tr_col # Need to make space for largest row
# plus truncate dot col
dif = max_len - self.w
adj_dif = dif
col_lens = Series([Series(ele).apply(len).max()
for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
adj_dif -= (col_len + 1) # adjoin adds one
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
max_cols_adj = n_cols - self.index # subtract index column
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
if not self.index:
text = text.replace('\n ', '\n').strip()
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[%d rows x %d columns]" %
(len(frame), len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x)
for x in idx]).max() + adjoin_width
col_widths = [np.array([self.adj.len(x) for x in col]).max() if
len(col) > 0 else 0 for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
if self.index:
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def to_latex(self, column_format=None, longtable=False, encoding=None,
multicolumn=False, multicolumn_format=None, multirow=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
latex_renderer = LatexFormatter(self, column_format=column_format,
longtable=longtable,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if encoding is None:
encoding = 'ascii' if compat.PY2 else 'utf-8'
if hasattr(self.buf, 'write'):
latex_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
import codecs
with codecs.open(self.buf, 'w', encoding=encoding) as f:
latex_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
'method')
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(frame.iloc[:, i]._values, formatter,
float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space, decimal=self.decimal)
def to_html(self, classes=None, notebook=False, border=None):
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
html_renderer = HTMLFormatter(self, classes=classes,
max_rows=self.max_rows,
max_cols=self.max_cols,
notebook=notebook,
border=border)
if hasattr(self.buf, 'write'):
html_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
html_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
columns = frame.columns
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any([l.is_floating for l in columns.levels])
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if (y not in self.formatters and
need_leadsp[x] and not restrict_formatting):
return ' ' + y
return y
str_columns = list(zip(*[[space_format(x, y) for y in x]
for x in fmt_columns]))
if self.sparsify:
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x if not self._get_formatter(i) and
need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns,
fmt_columns))]
if self.show_index_names and self.has_index_names:
for x in str_columns:
x.append('')
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by
# to_html().
index = frame.index
columns = frame.columns
show_index_names = self.show_index_names and self.has_index_names
show_col_names = (self.show_index_names and self.has_column_names)
fmt = self._get_formatter('__index__')
if isinstance(index, MultiIndex):
fmt_index = index.format(sparsify=self.sparsify, adjoin=False,
names=show_index_names, formatter=fmt)
else:
fmt_index = [index.format(name=show_index_names, formatter=fmt)]
fmt_index = [tuple(_make_fixed_width(list(x), justify='left',
minimum=(self.col_space or 0),
adj=self.adj)) for x in fmt_index]
adjoined = self.adj.adjoin(1, *fmt_index).split('\n')
# empty space for columns
if show_col_names:
col_header = ['%s' % x for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend('' if name is None else name
for name in columns.names)
else:
names.append('' if columns.name is None else columns.name)
return names
class LatexFormatter(TableFormatter):
""" Used to render a DataFrame to a LaTeX tabular/longtable environment
output.
Parameters
----------
formatter : `DataFrameFormatter`
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
longtable : boolean, default False
Use a longtable environment instead of tabular.
See also
--------
HTMLFormatter
"""
def __init__(self, formatter, column_format=None, longtable=False,
multicolumn=False, multicolumn_format=None, multirow=False):
self.fmt = formatter
self.frame = self.fmt.frame
self.column_format = column_format
self.longtable = longtable
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
def write_result(self, buf):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s') %
(type(self.frame).__name__, self.frame.columns,
self.frame.index))
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.index.names)
cname = any(self.frame.columns.names)
lastcol = self.frame.index.nlevels - 1
for i, lev in enumerate(self.frame.index.levels):
lev2 = lev.format()
blank = ' ' * len(lev2[0])
# display column names in last index-column
if cname and i == lastcol:
lev3 = [x if x else '{}' for x in self.frame.columns.names]
else:
lev3 = [blank] * clevels
if name:
lev3.append(lev.name)
for level_idx, group in itertools.groupby(
self.frame.index.labels[i]):
count = len(list(group))
lev3.extend([lev2[level_idx]] + [blank] * (count - 1))
strcols.insert(i, lev3)
column_format = self.column_format
if column_format is None:
dtypes = self.frame.dtypes._values
column_format = ''.join(map(get_col_type, dtypes))
if self.fmt.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, not %s'
% type(column_format))
if not self.longtable:
buf.write('\\begin{tabular}{%s}\n' % column_format)
buf.write('\\toprule\n')
else:
buf.write('\\begin{longtable}{%s}\n' % column_format)
buf.write('\\toprule\n')
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
nlevels = clevels
if any(self.frame.index.names):
nlevels += 1
strrows = list(zip(*strcols))
self.clinebuf = []
for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
buf.write('\\midrule\n') # End of header
if self.longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{3}{r}{{Continued on next '
'page}} \\\\\n')
buf.write('\\midrule\n')
buf.write('\\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.fmt.kwds.get('escape', True):
# escape backslashes first
crow = [(x.replace('\\', '\\textbackslash').replace('_', '\\_')
.replace('%', '\\%').replace('$', '\\$')
.replace('#', '\\#').replace('{', '\\{')
.replace('}', '\\}').replace('~', '\\textasciitilde')
.replace('^', '\\textasciicircum').replace('&', '\\&')
if x else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
if i < clevels and self.fmt.header and self.multicolumn:
# sum up columns to multicolumns
crow = self._format_multicolumn(crow, ilevels)
if (i >= nlevels and self.fmt.index and self.multirow and
ilevels > 1):
# sum up rows to multirows
crow = self._format_multirow(crow, ilevels, i, strrows)
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
if not self.longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
def _format_multicolumn(self, row, ilevels):
"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = list(row[:ilevels])
ncol = 1
coltext = ''
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append('\\multicolumn{{{0:d}}}{{{1:s}}}{{{2:s}}}'
.format(ncol, self.multicolumn_format,
coltext.strip()))
# don't modify where not needed
else:
row2.append(coltext)
for c in row[ilevels:]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row, ilevels, i, rows):
"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(ilevels):
if row[j].strip():
nrow = 1
for r in rows[i + 1:]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = '\\multirow{{{0:d}}}{{*}}{{{1:s}}}'.format(
nrow, row[j].strip())
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _print_cline(self, buf, i, icol):
"""
Print clines after multirow-blocks are finished
"""
for cl in self.clinebuf:
if cl[0] == i:
buf.write('\cline{{{0:d}-{1:d}}}\n'.format(cl[1], icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
class HTMLFormatter(TableFormatter):
indent_delta = 2
def __init__(self, formatter, classes=None, max_rows=None, max_cols=None,
notebook=False, border=None):
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements = []
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.escape = self.fmt.kwds.get('escape', True)
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
self.max_cols < len(self.fmt.columns))
self.notebook = notebook
if border is None:
border = get_option('html.border')
self.border = border
def write(self, s, indent=0):
rs = pprint_thing(s)
self.elements.append(' ' * indent + rs)
def write_th(self, s, indent=0, tags=None):
if self.fmt.col_space is not None and self.fmt.col_space > 0:
tags = (tags or "")
tags += 'style="min-width: %s;"' % self.fmt.col_space
return self._write_cell(s, kind='th', indent=indent, tags=tags)
def write_td(self, s, indent=0, tags=None):
return self._write_cell(s, kind='td', indent=indent, tags=tags)
def _write_cell(self, s, kind='td', indent=0, tags=None):
if tags is not None:
start_tag = '<%s %s>' % (kind, tags)
else:
start_tag = '<%s>' % kind
if self.escape:
# escape & first to prevent double escaping of &
esc = OrderedDict([('&', r'&'), ('<', r'<'),
('>', r'>')])
else:
esc = {}
rs = pprint_thing(s, escape_chars=esc).strip()
self.write('%s%s</%s>' % (start_tag, rs, kind), indent)
def write_tr(self, line, indent=0, indent_delta=4, header=False,
align=None, tags=None, nindex_levels=0):
if tags is None:
tags = {}
if align is None:
self.write('<tr>', indent)
else:
self.write('<tr style="text-align: %s;">' % align, indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write('</tr>', indent)
def write_style(self):
template = dedent("""\
<style>
.dataframe thead tr:only-child th {
text-align: right;
}
.dataframe thead th {
text-align: left;
}
.dataframe tbody tr th {
vertical-align: top;
}
</style>""")
if self.notebook:
self.write(template)
def write_result(self, buf):
indent = 0
frame = self.frame
_classes = ['dataframe'] # Default class.
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise AssertionError('classes must be list or tuple, '
'not %s' % type(self.classes))
_classes.extend(self.classes)
if self.notebook:
div_style = ''
try:
import IPython
if IPython.__version__ < LooseVersion('3.0.0'):
div_style = ' style="max-width:1500px;overflow:auto;"'
except (ImportError, AttributeError):
pass
self.write('<div{0}>'.format(div_style))
self.write_style()
self.write('<table border="%s" class="%s">' % (self.border,
' '.join(_classes)),
indent)
indent += self.indent_delta
indent = self._write_header(indent)
indent = self._write_body(indent)
self.write('</table>', indent)
if self.should_show_dimensions:
by = chr(215) if compat.PY3 else unichr(215) # ×
self.write(u('<p>%d rows %s %d columns</p>') %
(len(frame), by, len(frame.columns)))
if self.notebook:
self.write('</div>')
_put_lines(buf, self.elements)
def _write_header(self, indent):
truncate_h = self.fmt.truncate_h
row_levels = self.frame.index.nlevels
if not self.fmt.header:
# write nothing
return indent
def _column_header():
if self.fmt.index:
row = [''] * (self.frame.index.nlevels - 1)
else:
row = []
if isinstance(self.columns, MultiIndex):
if self.fmt.has_column_names and self.fmt.index:
row.append(single_column_table(self.columns.names))
else:
row.append('')
style = "text-align: %s;" % self.fmt.justify
row.extend([single_column_table(c, self.fmt.justify, style)
for c in self.columns])
else:
if self.fmt.index:
row.append(self.columns.name or '')
row.extend(self.columns)
return row
self.write('<thead>', indent)
row = []
indent += self.indent_delta
if isinstance(self.columns, MultiIndex):
template = 'colspan="%d" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
else:
sentinel = None
levels = self.columns.format(sparsify=sentinel, adjoin=False,
names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths,
levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = (values[:ins_col] + (u('...'),) +
values[ins_col:])
else:
# sparse col headers do not receive a ...
values = (values[:ins_col] +
(values[ins_col - 1], ) +
values[ins_col:])
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers
# get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = (values[:ins_col] + (u('...'),) +
values[ins_col:])
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = (values[:ins_col] + [u('...')] +
values[ins_col:])
name = self.columns.names[lnum]
row = [''] * (row_levels - 1) + ['' if name is None else
pprint_thing(name)]
if row == [""] and self.fmt.index is False:
row = []
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags,
header=True)
else:
col_row = _column_header()
align = self.fmt.justify
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
col_row.insert(ins_col, '...')
self.write_tr(col_row, indent, self.indent_delta, header=True,
align=align)
if all((self.fmt.has_index_names,
self.fmt.index,
self.fmt.show_index_names)):
row = ([x if x is not None else ''
for x in self.frame.index.names] +
[''] * min(len(self.columns), self.max_cols))
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
row.insert(ins_col, '')
self.write_tr(row, indent, self.indent_delta, header=True)
indent -= self.indent_delta
self.write('</thead>', indent)
return indent
def _write_body(self, indent):
self.write('<tbody>', indent)
indent += self.indent_delta
fmt_values = {}
for i in range(min(len(self.columns), self.max_cols)):
fmt_values[i] = self.fmt._format_col(i)
# write values
if self.fmt.index:
if isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent)
else:
self._write_regular_rows(fmt_values, indent)
else:
for i in range(min(len(self.frame), self.max_rows)):
row = [fmt_values[j][i] for j in range(len(self.columns))]
self.write_tr(row, indent, self.indent_delta, tags=None)
indent -= self.indent_delta
self.write('</tbody>', indent)
indent -= self.indent_delta
return indent
def _write_regular_rows(self, fmt_values, indent):
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
ncols = len(self.fmt.tr_frame.columns)
nrows = len(self.fmt.tr_frame)
fmt = self.fmt._get_formatter('__index__')
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
index_values = self.fmt.tr_frame.index.format()
row = []
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ['...' for ele in row]
self.write_tr(str_sep_row, indent, self.indent_delta,
tags=None, nindex_levels=1)
row = []
row.append(index_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
dot_col_ix = self.fmt.tr_col_num + 1
row.insert(dot_col_ix, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=1)
def _write_hierarchical_rows(self, fmt_values, indent):
template = 'rowspan="%d" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
ncols = len(frame.columns)
nrows = len(frame)
row_levels = self.frame.index.nlevels
idx_values = frame.index.format(sparsify=False, adjoin=False,
names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False,
names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
inserted = False
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
# GH 14882 - Make sure insertion done once
if not inserted:
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u('...')
idx_values.insert(ins_row, tuple(dot_row))
inserted = True
else:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u('...')
idx_values[ins_row] = tuple(dot_row)
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols
# receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(ins_row, tuple(
[u('...')] * len(level_lengths)))
# GH 14882 - Place ... in correct level
elif inserted:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u('...')
idx_values[ins_row] = tuple(dot_row)
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, '...')
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels - sparse_offset +
self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=tags,
nindex_levels=len(levels) - sparse_offset)
else:
for i in range(len(frame)):
idx_values = list(zip(*frame.index.format(
sparsify=False, adjoin=False, names=False)))
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=frame.index.nlevels)
class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='',
float_format=None, cols=None, header=True, index=True,
index_label=None, mode='w', nanRep=None, encoding=None,
compression=None, quoting=None, line_terminator='\n',
chunksize=None, tupleize_cols=False, quotechar='"',
date_format=None, doublequote=True, escapechar=None,
decimal='.'):
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf = _expand_user(_stringify_path(path_or_buf))
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
self.encoding = encoding
self.compression = compression
if quoting is None:
quoting = csv.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csv.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator
self.date_format = date_format
self.tupleize_cols = tupleize_cols
self.has_mi_columns = (isinstance(obj.columns, MultiIndex) and
not self.tupleize_cols)
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the "
"columns")
if cols is not None:
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if (isinstance(self.data_index, (DatetimeIndex, PeriodIndex)) and
date_format is not None):
self.data_index = Index([x.strftime(date_format) if notnull(x) else
'' for x in self.data_index])
self.nlevels = getattr(self.data_index, 'nlevels', 1)
if not index:
self.nlevels = 0
def save(self):
# create the writer & save
if hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding is not None:
writer_kwargs['encoding'] = self.encoding
self.writer = UnicodeWriter(f, **writer_kwargs)
else:
self.writer = csv.writer(f, **writer_kwargs)
self._save()
finally:
if close:
f.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, MultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label,
(list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns:
encoded_labels += list(write_cols)
writer.writerow(encoded_labels)
else:
# write out the mi
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([''] * (len(index_label) - 1))
col_line.extend(columns._get_level_values(i))
writer.writerow(col_line)
# Write out the index line if it's not empty.
# Otherwise, we will print out an extraneous
# blank line between the mi and the data rows.
if encoded_labels and set(encoded_labels) != set(['']):
encoded_labels.extend([''] * len(columns))
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
# ----------------------------------------------------------------------
# Array formatters
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right', decimal='.'):
if is_categorical_dtype(values):
fmt_klass = CategoricalArrayFormatter
elif is_interval_dtype(values):
fmt_klass = IntervalArrayFormatter
elif is_float_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_period_arraylike(values):
fmt_klass = PeriodArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
elif is_datetimetz(values):
fmt_klass = Datetime64TZFormatter
elif is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format, formatter=formatter,
space=space, justify=justify, decimal=decimal)
return fmt_obj.get_result()
class GenericArrayFormatter(object):
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right', decimal='.',
quoting=None, fixed_width=True):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
self.decimal = decimal
self.quoting = quoting
self.fixed_width = fixed_width
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = '%% .%dg' % get_option("display.precision")
float_format = lambda x: fmt_str % x
else:
float_format = self.float_format
formatter = (
self.formatter if self.formatter is not None else
(lambda x: pprint_thing(x, escape_chars=('\t', '\r', '\n'))))
def _format(x):
if self.na_rep is not None and lib.checknull(x):
if x is None:
return 'None'
elif x is pd.NaT:
return 'NaT'
return self.na_rep
elif isinstance(x, PandasObject):
return '%s' % x
else:
# object dtype
return '%s' % formatter(x)
vals = self.values
if isinstance(vals, Index):
vals = vals._values
elif isinstance(vals, ABCSparseArray):
vals = vals.values
is_float_type = lib.map_infer(vals, is_float) & notnull(vals)
leading_space = is_float_type.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
fmt_values.append(' %s' % _format(v))
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
fmt_values.append(' %s' % _format(v))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(self, float_format=None, threshold=None):
"""Returns a function to be applied on each value to format it
"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return (float_format % v) if notnull(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notnull(v) else self.na_rep
if self.decimal != '.':
def decimal_formatter(v):
return base_formatter(v).replace('.', self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notnull(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initalisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# separate the wheat from the chaff
values = self.values
mask = isnull(values)
if hasattr(values, 'to_dense'): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype='object')
values[mask] = self.na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array([formatter(val)
for val in values.ravel()[imask]])
if self.fixed_width:
return _trim_zeros(values, self.na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None and self.fixed_width:
float_format = '%% .%df' % self.digits
else:
float_format = self.float_format
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid='ignore'):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10**(-self.digits)) &
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
float_format = '%% .%de' % self.digits
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self):
# shortcut
if self.formatter is not None:
return [self.formatter(x) for x in self.values]
return list(self.get_result_as_array())
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '% d' % x)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
super(Datetime64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if self.formatter is not None and callable(self.formatter):
return [self.formatter(x) for x in values]
fmt_values = format_array_from_datetime(
values.asi8.ravel(),
format=_get_format_datetime64_from_values(values,
self.date_format),
na_rep=self.nat_rep).reshape(values.shape)
return fmt_values.tolist()
class IntervalArrayFormatter(GenericArrayFormatter):
def __init__(self, values, *args, **kwargs):
GenericArrayFormatter.__init__(self, values, *args, **kwargs)
def _format_strings(self):
formatter = self.formatter or str
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
class PeriodArrayFormatter(IntArrayFormatter):
def _format_strings(self):
from pandas.core.indexes.period import IncompatibleFrequency
try:
values = PeriodIndex(self.values).to_native_types()
except IncompatibleFrequency:
# periods may contains different freq
values = Index(self.values, dtype='object').to_native_types()
formatter = self.formatter or (lambda x: '%s' % x)
fmt_values = [formatter(x) for x in values]
return fmt_values
class CategoricalArrayFormatter(GenericArrayFormatter):
def __init__(self, values, *args, **kwargs):
GenericArrayFormatter.__init__(self, values, *args, **kwargs)
def _format_strings(self):
fmt_values = format_array(self.values.get_values(), self.formatter,
float_format=self.float_format,
na_rep=self.na_rep, digits=self.digits,
space=self.space, justify=self.justify)
return fmt_values
def format_percentiles(percentiles):
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid='ignore'):
if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \
or not np.all(percentiles <= 1):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = (percentiles.astype(int) == percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + '%' for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(np.log10(np.min(
np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)
))).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + '%' for i in out]
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or lib.checknull(x):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
x = Timestamp(x, tz=tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.asobject
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or
_get_format_datetime64(is_dates_only,
date_format=self.date_format))
fmt_values = [formatter(x) for x in values]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
super(Timedelta64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = (self.formatter or
_get_format_timedelta64(self.values, nat_rep=self.nat_rep,
box=self.box))
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(
consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = 'even_day'
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{0}'".format(result)
return result
return _formatter
def _make_fixed_width(strings, justify='right', minimum=None, adj=None):
if len(strings) == 0 or justify == 'all':
return strings
if adj is None:
adj = _get_adjustment()
max_len = np.max([adj.len(x) for x in strings])
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x):
if conf_max is not None:
if (conf_max > 3) & (adj.len(x) > max_len):
x = x[:max_len - 3] + '...'
return x
strings = [just(x) for x in strings]
result = adj.justify(strings, max_len, mode=justify)
return result
def _trim_zeros(str_floats, na_rep='NaN'):
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _cond(values):
non_na = [x for x in values if x != na_rep]
return (len(non_na) > 0 and all([x.endswith('0') for x in non_na]) and
not (any([('e' in x) or ('E' in x) for x in non_na])))
while _cond(trimmed):
trimmed = [x[:-1] if x != na_rep else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith('.') and x != na_rep else x for x in trimmed]
def single_column_table(column, align=None, style=None):
table = '<table'
if align is not None:
table += (' align="%s"' % align)
if style is not None:
table += (' style="%s"' % style)
table += '><tbody>'
for i in column:
table += ('<tr><td>%s</td></tr>' % str(i))
table += '</tbody></table>'
return table
def single_row_table(row): # pragma: no cover
table = '<table><tbody><tr>'
for i in row:
table += ('<td>%s</td>' % str(i))
table += '</tr></tbody></table>'
return table
def _has_names(index):
if isinstance(index, MultiIndex):
return any([x is not None for x in index.names])
else:
return index.name is not None
class EngFormatter(object):
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
return 'NaN'
if decimal.Decimal.is_infinite(dnum):
return 'inf'
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = 'E-%02d' % (-int_pow10)
else:
prefix = 'E+%02d' % int_pow10
mant = sign * dnum / (10**pow10)
if self.accuracy is None: # pragma: no cover
format_str = u("% g%s")
else:
format_str = (u("%% .%if%%s") % self.accuracy)
formatted = format_str % (mant, prefix)
return formatted # .strip()
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _put_lines(buf, lines):
if any(isinstance(x, compat.text_type) for x in lines):
lines = [compat.text_type(x) for x in lines]
buf.write('\n'.join(lines))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
|
{
"content_hash": "25a9db107ba33fb45f8a86eb2b868f6f",
"timestamp": "",
"source": "github",
"line_count": 2431,
"max_line_length": 79,
"avg_line_length": 35.59605100781572,
"alnum_prop": 0.5138789377585689,
"repo_name": "linebp/pandas",
"id": "3deaec2dfbbc5bcf7760bc78b5cc6f9071c9cf09",
"size": "86559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/io/formats/format.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6920"
},
{
"name": "C",
"bytes": "492693"
},
{
"name": "C++",
"bytes": "17353"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "11946251"
},
{
"name": "R",
"bytes": "9964"
},
{
"name": "Shell",
"bytes": "22404"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
import os
from .version import _warn_if_setuptools_outdated
from .utils import do
from .discover import find_matching_entrypoint
from . import get_version
def version_keyword(dist, keyword, value):
_warn_if_setuptools_outdated()
if not value:
return
if value is True:
value = {}
if getattr(value, '__call__', None):
value = value()
dist.metadata.version = get_version(**value)
def find_files(path='.'):
if not path:
path = '.'
abs = os.path.abspath(path)
ep = find_matching_entrypoint(abs, 'setuptools_scm.files_command')
if ep:
command = ep.load()
try:
if isinstance(command, str):
return do(ep.load(), path).splitlines()
else:
return command(path)
except Exception:
import traceback
print("File Finder Failed for %s" % ep)
traceback.print_exc()
return []
else:
return []
|
{
"content_hash": "d1924db5fa1bf38c7f8b5c586663d7b2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.5724417426545086,
"repo_name": "HolgerPeters/setuptools_scm",
"id": "1b66a1d703ba6e9e98c640c7678576690ce990fa",
"size": "987",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "setuptools_scm/integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24272"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from webfriend.rpc.reply import Reply
from webfriend.rpc.base import Base
from webfriend.rpc.browser import Browser
from webfriend.rpc.console import Console
from webfriend.rpc.dom import DOM, DOMElement
from webfriend.rpc.emulation import Emulation
from webfriend.rpc.event import Event
from webfriend.rpc.input import Input
from webfriend.rpc.inspector import Inspector
from webfriend.rpc.network import Network
from webfriend.rpc.overlay import Overlay
from webfriend.rpc.page import Page
from webfriend.rpc.runtime import Runtime
from webfriend.rpc.schema import Schema
from webfriend.rpc.target import Target
|
{
"content_hash": "cc14336682df92a67b4de02cb5b71da8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 45,
"avg_line_length": 40.8125,
"alnum_prop": 0.8453292496171516,
"repo_name": "ghetzel/webfriend",
"id": "f121ed9af3a59d0c4f4f969b1c9a6853f102806c",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webfriend/rpc/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "962"
},
{
"name": "Python",
"bytes": "284888"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
"""
This module implements an XRD pattern calculator.
"""
from six.moves import filter
from six.moves import map
from six.moves import zip
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "5/22/14"
from math import sin, cos, asin, pi, degrees, radians
import os
import numpy as np
import json
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
#XRD wavelengths in angstroms
WAVELENGTHS = {
"CuKa": 1.54184,
"CuKa2": 1.54439,
"CuKa1": 1.54056,
"CuKb1": 1.39222,
"MoKa": 0.71073,
"MoKa2": 0.71359,
"MoKa1": 0.70930,
"MoKb1": 0.63229,
"CrKa": 2.29100,
"CrKa2": 2.29361,
"CrKa1": 2.28970,
"CrKb1": 2.08487,
"FeKa": 1.93735,
"FeKa2": 1.93998,
"FeKa1": 1.93604,
"FeKb1": 1.75661,
"CoKa": 1.79026,
"CoKa2": 1.79285,
"CoKa1": 1.78896,
"CoKb1": 1.63079,
"AgKa": 0.560885,
"AgKa2": 0.563813,
"AgKa1": 0.559421,
"AgKb1": 0.497082,
}
with open(os.path.join(os.path.dirname(__file__),
"atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
class XRDCalculator(object):
"""
Computes the XRD pattern of a crystal structure.
This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -
Crystallography of Materials. The formalism for this code is based on
that given in Chapters 11 and 12 of Structure of Materials by Marc De
Graef and Michael E. McHenry. This takes into account the atomic
scattering factors and the Lorentz polarization factor, but not
the Debye-Waller (temperature) factor (for which data is typically not
available). Note that the multiplicity correction is not needed since
this code simply goes through all reciprocal points within the limiting
sphere, which includes all symmetrically equivalent planes. The algorithm
is as follows
1. Calculate reciprocal lattice of structure. Find all reciprocal points
within the limiting sphere given by :math:`\\frac{2}{\\lambda}`.
2. For each reciprocal point :math:`\\mathbf{g_{hkl}}` corresponding to
lattice plane :math:`(hkl)`, compute the Bragg condition
:math:`\\sin(\\theta) = \\frac{\\lambda}{2d_{hkl}}`
3. Compute the structure factor as the sum of the atomic scattering
factors. The atomic scattering factors are given by
.. math::
f(s) = Z - 41.78214 \\times s^2 \\times \\sum\\limits_{i=1}^n a_i \
\exp(-b_is^2)
where :math:`s = \\frac{\\sin(\\theta)}{\\lambda}` and :math:`a_i`
and :math:`b_i` are the fitted parameters for each element. The
structure factor is then given by
.. math::
F_{hkl} = \\sum\\limits_{j=1}^N f_j \\exp(2\\pi i \\mathbf{g_{hkl}}
\cdot \\mathbf{r})
4. The intensity is then given by the modulus square of the structure
factor.
.. math::
I_{hkl} = F_{hkl}F_{hkl}^*
5. Finally, the Lorentz polarization correction factor is applied. This
factor is given by:
.. math::
P(\\theta) = \\frac{1 + \\cos^2(2\\theta)}
{\\sin^2(\\theta)\\cos(\\theta)}
"""
#Tuple of available radiation keywords.
AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())
#Tolerance in which to treat two peaks as having the same two theta.
TWO_THETA_TOL = 1e-5
# Tolerance in which to treat a peak as effectively 0 if the scaled
# intensity is less than this number. Since the max intensity is 100,
# this means the peak must be less than 1e-5 of the peak intensity to be
# considered as zero. This deals with numerical issues where systematic
# absences do not cancel exactly to zero.
SCALED_INTENSITY_TOL = 1e-3
def __init__(self, wavelength="CuKa", symprec=0, debye_waller_factors=None):
"""
Initializes the XRD calculator with a given radiation.
Args:
wavelength (str/float): The wavelength can be specified as either a
float or a string. If it is a string, it must be one of the
supported definitions in the AVAILABLE_RADIATION class
variable, which provides useful commonly used wavelengths.
If it is a float, it is interpreted as a wavelength in
angstroms. Defaults to "CuKa", i.e, Cu K_alpha radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, float):
self.wavelength = wavelength
else:
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_xrd_data(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the XRD data for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(XRD pattern) in the form of
[[two_theta, intensity, {(h, k, l): mult}, d_hkl], ...]
Two_theta is in degrees. Intensity is in arbitrary units and if
scaled (the default), has a maximum value of 100 for the highest
peak. {(h, k, l): mult} is a dict of Miller indices for all
diffracted lattice planes contributing to that intensity and
their multiplicities. d_hkl is the interplanar spacing.
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (0, 2 / wavelength) if two_theta_range is None else \
[2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere(
[[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = filter(lambda d: d[1] >= min_r, recip_pts)
# Create a flattened array of zs, coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
zs = []
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species_and_occu.items():
zs.append(sp.Z)
try:
c = ATOMIC_SCATTERING_PARAMS[sp.symbol]
except KeyError:
raise ValueError("Unable to calculate XRD pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
zs = np.array(zs)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind in sorted(
recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
#Store s^2 since we are using it a few times.
s2 = s ** 2
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Highly vectorized computation of atomic scattering factors.
# Equivalent non-vectorized code is::
#
# for site in structure:
# el = site.specie
# coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]
# fs = el.Z - 41.78214 * s2 * sum(
# [d[0] * exp(-d[1] * s2) for d in coeff])
fs = zs - 41.78214 * s2 * np.sum(
coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)
dw_correction = np.exp(-dwfactors * s2)
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r)
* dw_correction)
#Lorentz polarization correction for hkl
lorentz_factor = (1 + cos(2 * theta) ** 2) / \
(sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
#Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], - hkl[0] - hkl[1], hkl[2])
#Deal with floating point precision issues.
ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) <
XRDCalculator.TWO_THETA_TOL)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)],
d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max([v[0] for v in peaks.values()])
data = []
for k in sorted(peaks.keys()):
v = peaks[k]
scaled_intensity = v[0] / max_intensity * 100 if scaled else v[0]
fam = get_unique_families(v[1])
if scaled_intensity > XRDCalculator.SCALED_INTENSITY_TOL:
data.append([k, scaled_intensity, fam, v[2]])
return data
def get_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Returns the XRD plot as a matplotlib.pyplot.
Args:
structure: Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks: Whether to annotate the peaks with plane
information.
Returns:
(matplotlib.pyplot)
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(16, 10)
for two_theta, i, hkls, d_hkl in self.get_xrd_data(
structure, two_theta_range=two_theta_range):
if two_theta_range[0] <= two_theta <= two_theta_range[1]:
label = ", ".join([str(hkl) for hkl in hkls.keys()])
plt.plot([two_theta, two_theta], [0, i], color='k',
linewidth=3, label=label)
if annotate_peaks:
plt.annotate(label, xy=[two_theta, i],
xytext=[two_theta, i], fontsize=16)
plt.xlabel(r"$2\theta$ ($^\circ$)")
plt.ylabel("Intensities (scaled)")
plt.tight_layout()
return plt
def show_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Shows the XRD plot.
Args:
structure (Structure): Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
"""
self.get_xrd_plot(structure, two_theta_range=two_theta_range,
annotate_peaks=annotate_peaks).show()
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
#TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = map(abs, hkl1)
h2 = map(abs, hkl2)
return all([i == j for i, j in zip(sorted(h1), sorted(h2))])
unique = {}
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2] += 1
break
if not found:
unique[hkl1] = 1
return unique
|
{
"content_hash": "4e6a2e653899a5e72f4f6ded21daa313",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 80,
"avg_line_length": 38.39947780678851,
"alnum_prop": 0.5617733052288026,
"repo_name": "ctoher/pymatgen",
"id": "62d5c6f6fe86880a4398b7bc383a179c6d8d850d",
"size": "14724",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/diffraction/xrd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Groff",
"bytes": "868"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3688213"
}
],
"symlink_target": ""
}
|
import pytest
from rclpy.serialization import deserialize_message
from rclpy.serialization import serialize_message
from test_msgs.message_fixtures import get_test_msg
from test_msgs.msg import Arrays
from test_msgs.msg import BasicTypes
from test_msgs.msg import BoundedSequences
from test_msgs.msg import Builtins
from test_msgs.msg import Constants
from test_msgs.msg import Defaults
from test_msgs.msg import Empty
from test_msgs.msg import MultiNested
from test_msgs.msg import Nested
from test_msgs.msg import Strings
from test_msgs.msg import UnboundedSequences
from test_msgs.msg import WStrings
test_msgs = [
(get_test_msg('Arrays'), Arrays),
(get_test_msg('BasicTypes'), BasicTypes),
(get_test_msg('BoundedSequences'), BoundedSequences),
(get_test_msg('Builtins'), Builtins),
(get_test_msg('Constants'), Constants),
(get_test_msg('Defaults'), Defaults),
(get_test_msg('Empty'), Empty),
(get_test_msg('MultiNested'), MultiNested),
(get_test_msg('Nested'), Nested),
(get_test_msg('Strings'), Strings),
(get_test_msg('UnboundedSequences'), UnboundedSequences),
(get_test_msg('WStrings'), WStrings),
]
@pytest.mark.parametrize('msgs,msg_type', test_msgs)
def test_serialize_deserialize(msgs, msg_type):
"""Test message serialization/deserialization."""
for msg in msgs:
msg_serialized = serialize_message(msg)
msg_deserialized = deserialize_message(msg_serialized, msg_type)
assert msg == msg_deserialized
def test_set_float32():
"""Test message serialization/deserialization of float32 type."""
# During (de)serialization we convert to a C float before converting to a PyObject.
# This can result in a loss of precision
msg = BasicTypes()
msg.float32_value = 1.125 # can be represented without rounding
msg_serialized = serialize_message(msg)
msg_deserialized = deserialize_message(msg_serialized, BasicTypes)
assert msg.float32_value == msg_deserialized.float32_value
msg = BasicTypes()
msg.float32_value = 3.14 # can NOT be represented without rounding
msg_serialized = serialize_message(msg)
msg_deserialized = deserialize_message(msg_serialized, BasicTypes)
assert msg.float32_value == round(msg_deserialized.float32_value, 2)
|
{
"content_hash": "89e6ae71c6ceae5638961d795aa97613",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 87,
"avg_line_length": 38.23728813559322,
"alnum_prop": 0.7406914893617021,
"repo_name": "ros2/rclpy",
"id": "f74215657f9662796c652bd155019ab043315fbc",
"size": "2858",
"binary": false,
"copies": "1",
"ref": "refs/heads/rolling",
"path": "rclpy/test/test_serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "C++",
"bytes": "299849"
},
{
"name": "CMake",
"bytes": "7020"
},
{
"name": "Makefile",
"bytes": "584"
},
{
"name": "Python",
"bytes": "796769"
}
],
"symlink_target": ""
}
|
print "Let's practice everything."
print 'You\'d need to know \'about escapes with \\ that do newlines and tab'
poem = """
\t The lovely world
with logic so firmly planted
cannot discern \n the nees of love
nor comprehend passion from intuition
and requires an explannation
\n\t\twhere there is none.
"""
print "---------------"
print poem
print "---------------"
five = 10 - 2 + 3 - 6
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans,jars,crates
start_point = 10000
beans,jars,crates = secret_formula(start_point)
print "With a starting point of : %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans,jars,crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
|
{
"content_hash": "1b05c4ec147a90b7614e27833a1df67a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 81,
"avg_line_length": 25.805555555555557,
"alnum_prop": 0.6727664155005382,
"repo_name": "AisakaTiger/Learn-Python-The-Hard-Way",
"id": "f022e90bfaf79bc69fd52c8c7b41dfeab8267933",
"size": "929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex24.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "497"
},
{
"name": "Python",
"bytes": "43256"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.costmanagement import CostManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-costmanagement
# USAGE
python private_scheduled_actions_list_filter_by_view_id.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CostManagementClient(
credential=DefaultAzureCredential(),
)
response = client.scheduled_actions.list()
for item in response:
print(item)
# x-ms-original-file: specification/cost-management/resource-manager/Microsoft.CostManagement/stable/2022-10-01/examples/scheduledActions/scheduledActions-listWithFilter-private.json
if __name__ == "__main__":
main()
|
{
"content_hash": "8d74f41db0cbd12c0f801370d35f7f02",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 182,
"avg_line_length": 35.233333333333334,
"alnum_prop": 0.7540208136234626,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6814e9ba13ae2fb3ede07afad661e1d04ae36511",
"size": "1525",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/costmanagement/azure-mgmt-costmanagement/generated_samples/private_scheduled_actions_list_filter_by_view_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import datetime
import gevent
import os
import re
import urllib2
import urlparse
from gevent import monkey
monkey.patch_all()
from flask import Flask, render_template, send_from_directory, request, make_response
from lxml.html import parse
# initialization
app = Flask(__name__)
#app.config.update(
# DEBUG=True,
#)
# controllers
@app.route('/favicon.ico')
def favicon():
return add_cache_headers(send_from_directory(os.path.join(app.root_path, 'static'), 'ico/favicon.ico'), 30240)
@app.route('/css/<filename>')
def css(filename):
return add_cache_headers(send_from_directory(os.path.join(app.root_path, 'static'), "css/%s" % filename), 30240)
@app.route('/js/<filename>')
def js(filename):
return add_cache_headers(send_from_directory(os.path.join(app.root_path, 'static'), "js/%s" % filename), 30240)
@app.errorhandler(404)
def page_not_found():
return add_cache_headers(render_template('404.html'), 30240), 404
@app.route("/")
def index():
value = request.args.get('slug')
if value is None:
value = ""
return add_cache_headers(render_template('index.html', value=value), 60)
@app.route("/about")
def about():
return add_cache_headers(render_template('about.html'), 30240)
@app.route("/slug/<path:slug>")
def check(slug):
return add_cache_headers(service_check("/%s" % slug), 5)
# Helper functions
def extract_service_domain_from_link(link):
domain = urlparse.urlparse(link).netloc
if re.match('^.*\.service\.gov\.uk$', domain):
return True, domain
else:
return False, "The link is not to something on the service.gov.uk domain"
def find_link_from_slug(govuk_slug):
try:
service_link = None
html = urllib2.urlopen("https://www.gov.uk%s" % govuk_slug)
doc = parse(html).getroot()
for link in doc.cssselect('.get-started a'):
if link.text_content() == 'Start now' or link.text_content() == 'Apply now':
service_link = link.get('href')
if service_link is not None:
return True, service_link
for form in doc.cssselect('form.get-started'):
service_link = form.get('action')
if service_link is not None:
return True, service_link
return False, "Could not find 'Start now' link on https://www.gov.uk%s" % govuk_slug
except IOError:
return False, "https://www.gov.uk%s" % govuk_slug
def header_dict(headers):
dikt = {}
for header in headers:
key, value = header.split(': ', 1)
dikt[key.lower()] = value.rstrip()
return dikt
def format_output(status, title, description):
return render_template('check.html', status=status, title=title, description=description)
def datetime_filter(datetime, format_string='%d/%m/%Y %H:%M'):
return datetime.strftime(format_string)
app.jinja_env.filters['datetime'] = datetime_filter
def add_cache_headers(response, minutes):
response = make_response(response)
then = datetime.datetime.utcnow() + datetime.timedelta(minutes=minutes)
rfc822 = then.strftime("%a, %d %b %Y %H:%M:%S +0000")
response.headers.add('Expires', rfc822)
response.headers.add(
'Cache-Control', 'public,max-age=%d' % int(60 * minutes))
return response
# Service checks
def check_bare_ssl_domain_redirects_to_slug(domain, slug):
correct_location = "https://www.gov.uk%s" % slug
bare_domain = "https://%s/" % domain
url = urllib2.urlopen(bare_domain)
location = url.geturl()
correct_location = "https://www.gov.uk%s" % slug
check_title = "The bare service domain should redirect back to the GOV.UK start page"
check_description = """
In order to make sure that all transactions begin and end on GOV.UK, it is important that
the bare domain (<a href='%s'>%s</a>) redirects back to the GOV.UK start page (<a href='%s'>%s</a>), so that if users are
typing the URL from memory, they get a consistent user experience and their browser does
not cache the wrong entry page.
""" % (bare_domain, bare_domain, correct_location, correct_location)
if location == correct_location:
return True, check_title, check_description
else:
return False, check_title, check_description
def check_listening_on_http(domain):
check_title = "The service should enforce SSL"
check_description = """
Users must have confidence that any information they are submitting to a service, including
pages they visit, is not available to a 3rd-party. In order to enforce this, the service should
either reject non-SSL connections, or should immediately redirect them to secured connection via SSL.
"""
try:
url = urllib2.urlopen("http://%s/" % domain, timeout=1)
parsed_url = urlparse.urlparse(url.geturl())
if parsed_url.scheme == 'https':
return True, "%s (Service redirects HTTP to HTTPS)" % check_title, check_description
else:
return False, check_title, check_description
except IOError:
return True, "%s (Service does not listen on HTTP)" % check_title, check_description
def check_for_HSTS_header(link):
check_title = "The service should set a Strict-Transport-Security (HSTS) header"
check_description = """
To reduce the chance that traffic for a user can be intercepted, the service
should notify the browser that in future it should only use secure connections.
It can do this by setting an HTTP Header called 'Strict-Transport-Security'.
"""
try:
url = urllib2.urlopen(link)
headers = header_dict(url.info().headers)
if 'strict-transport-security' in headers.keys():
return True, check_title, check_description
else:
return False, check_title, check_description
except urllib2.HTTPError as e:
return False, check_title, "Error: %s" % e
def check_for_www(domain):
check_title = "The service domain format should be www.{service}.service.gov.uk"
check_description = """
The Service Manual states that Users must interact with a single domain and that it
will be www.{service}.service.gov.uk. It is permissible to create extra domains for
example for Content Delivery Networks, Assets or Administration, however the user-facing
domain should be prefixed by www.
"""
if re.match('^www\.[^.]+\.service\.gov\.uk$', domain):
return True, check_title, check_description
else:
return False, check_title, check_description
def check_for_robots_txt(domain):
check_title = "The service should have a robots.txt file"
check_description = """
Every service hosted on a service.gov.uk domain must have a robots.txt file asking search engines
not to index any part of the site. More details can be found on the <a href='http://www.robotstxt.org/faq/prevent.html'>Web Robots pages</a>
"""
try:
url = urllib2.urlopen("https://%s/robots.txt" % domain)
headers = header_dict(url.info().headers)
if headers['content-type'].startswith("text/plain"):
return True, check_title, check_description
else:
return False, check_title, "The robots.txt file exists, but is %s rather than text/plain." % headers['content-type']
except urllib2.HTTPError as e:
return False, check_title, "Could not find robots.txt (Error: %s)" % e
def check_cookies(link):
failed = False
check_title = "Cookies should be Secure, HttpOnly and scoped to the service domain"
check_description = """
Cookies used on www.{service}.service.gov.uk must be scoped to the originating domain only.
Cookies must not be scoped to the domain servicename.service.gov.uk. Cookies must be sent with
the <code>Secure</code> attribute and should, where appropriate, be sent with the <code>HttpOnly</code>
attribute. These flags <a href='https://en.wikipedia.org/wiki/HTTP_cookie#Secure_and_HttpOnly'>provide additional assurances
about how cookies will be handled by browsers.</a>
"""
domain = extract_service_domain_from_link(link)[1]
cookie_domain = "domain=" + domain
url = urllib2.urlopen(link)
headers = url.info().headers
for header in headers:
key, value = header.rstrip().split(': ', 1)
if key.lower() == 'set-cookie':
cookie_settings = value.lower().split('; ')
if 'httponly' not in cookie_settings:
check_description += "<br /><br />HttpOnly is not set<br /><"
check_description += " Set-Cookie: %s<br />" % value
failed = True
if 'secure' not in cookie_settings:
check_description += "<br /><br />Secure is not set<br />"
check_description += " Set-Cookie: %s<br />" % value
failed = True
if cookie_domain not in cookie_settings:
check_description += "<br /><br />Cookie not scoped to domain=%s<br />" % domain
check_description += " Set-Cookie: %s<br />" % value
failed = True
if failed:
return False, check_title, check_description
else:
return True, check_title, check_description
# Main logic process
def service_check(slug):
output = ""
result, link = find_link_from_slug(slug)
if result:
output += format_output(result,
"The GOV.UK start page should link to the service",
"""All transactions should start on GOV.UK with a transaction start page.
You supplied the start page of <a href='https://www.gov.uk%s'>https://www.gov.uk%s</a>
which appears to link to a service: <a href='%s'>%s</a>
""" % (slug, slug, link, link))
result, domain = extract_service_domain_from_link(link)
if result:
checks = [
gevent.spawn(check_bare_ssl_domain_redirects_to_slug, domain, slug),
gevent.spawn(check_listening_on_http, domain),
gevent.spawn(check_for_www, domain),
gevent.spawn(check_for_HSTS_header, link),
gevent.spawn(check_for_robots_txt, domain),
gevent.spawn(check_cookies, link)
]
gevent.joinall(checks)
for check in checks:
status, message, description = check.value
output += "%s\n" % format_output(status, message, description)
else:
output += format_output(result,
"The GOV.UK start page should link to service on a service.gov.uk domain",
"""You supplied the start page of <a href='https://www.gov.uk%s'>https://www.gov.uk%s</a>
which appears to have a 'Start now' button, but it does not link to something on the
service.gov.uk domain as it points to <a href='%s'>%s</a>.""" % (slug, slug, link, link))
else:
output += format_output(result,
"The GOV.UK start page should link to the service",
"""All transactions should start on GOV.UK with a transaction start page.
You supplied the start page of <a href='https://www.gov.uk%s'>https://www.gov.uk%s</a>,
but either the page does not exist, or I cannot find a 'Start now' link on this
page pointing to a service.""" % (slug, slug))
return render_template('service_check.html', output=output, link=link, checked_at=datetime.datetime.now())
# launch
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=True)
|
{
"content_hash": "9c1229c63ba0e431370c7a056eb8df55",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 144,
"avg_line_length": 41.388888888888886,
"alnum_prop": 0.6273489932885906,
"repo_name": "alphagov/service-domain-checker",
"id": "432d98da63a16947601caeb9efab44e304ab7d2d",
"size": "11942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13047"
}
],
"symlink_target": ""
}
|
"""
Unit tests for gerrit.projects.project
"""
from copy import copy
import mock
from gerrit.error import (
UnhandledError,
AlreadyExists,
)
from gerrit.projects.project import Project
from tests import GerritUnitTest
class ProjectTestCase(GerritUnitTest):
"""
Unit tests for handling projects
"""
def setUp(self):
self.project_content = self.build_response(
{
'name': self.PROJECT,
'parent': self.PARENT,
'description': self.DESCRIPTION,
'state': self.STATE,
}
)
self.gerrit_con = mock.Mock()
self.req = mock.Mock()
self.req.status_code = 200
self.req.content = self.build_response({})
self.gerrit_con.call.return_value = self.req
self.req_delete = copy(self.req)
self.req_delete.status_code = 204
def test_create(self):
"""
Test that a project can be created
"""
self.req.status_code = 201
with mock.patch.object(Project, 'get_project') as mock_get_project:
project = Project(self.gerrit_con)
project.create_project(
self.PROJECT,
{'description': self.DESCRIPTION},
)
self.gerrit_con.call.assert_called_with(
request='put',
r_payload={'description': self.DESCRIPTION},
r_endpoint='/a/projects/{}'.format(self.PROJECT),
)
mock_get_project.assert_called_with(self.PROJECT)
def test_create_without_options(self):
"""
Test that a project can be created without options
"""
self.req.status_code = 201
with mock.patch.object(Project, 'get_project') as mock_get_project:
project = Project(self.gerrit_con)
project.create_project(
self.PROJECT,
None,
)
self.gerrit_con.call.assert_called_with(
request='put',
r_payload={},
r_endpoint='/a/projects/{}'.format(self.PROJECT),
)
mock_get_project.assert_called_with(self.PROJECT)
def test_create_exists(self):
"""
Test that it raises if you try to create a project that already exists
"""
self.req.status_code = 409
with self.assertRaises(AlreadyExists):
project = Project(self.gerrit_con)
project.create_project(self.PROJECT, None)
def test_create_unknown_error(self):
"""
Test that it raises if server returns unknown status code
"""
self.req.status_code = 503
with self.assertRaises(UnhandledError):
project = Project(self.gerrit_con)
project.create_project(self.PROJECT, None)
def test_get_returns_project(self):
"""
Test that a project can be fetched
"""
self.req.content = self.project_content
project = Project(self.gerrit_con)
project = project.get_project(self.PROJECT)
self.assertEqual(project.name, self.PROJECT)
self.assertEqual(project.parent, self.PARENT)
self.assertEqual(project.description, self.DESCRIPTION)
self.assertEqual(project.state, self.STATE)
self.assertEqual(project.branches, None)
self.assertEqual(project.web_links, None)
def test_get_raises_on_empty_name(self):
"""
Test that it raises if an empty project name is specified
"""
with self.assertRaises(KeyError):
project = Project(self.gerrit_con)
project.get_project('')
def test_get_raises_on_nonexist(self):
"""
Test that it raises if the project doesn't exist
"""
self.req.status_code = 404
with self.assertRaises(ValueError):
project = Project(self.gerrit_con)
project.get_project(self.PROJECT)
def test_get_raises_on_unknown(self):
"""
Test that it raises if gerrit returns an unknown status code
"""
self.req.status_code = 503
with self.assertRaises(UnhandledError):
project = Project(self.gerrit_con)
project.get_project(self.PROJECT)
def test_delete_success(self):
"""
Test that it is possible to delete a project
"""
self.req.content = self.project_content
self.gerrit_con.call.side_effect = [self.req, self.req_delete]
project = Project(self.gerrit_con)
project = project.get_project(self.PROJECT)
self.assertTrue(project.delete())
self.gerrit_con.call.assert_called_with(
request='delete',
r_endpoint='/a/projects/{}'.format(self.PROJECT),
r_headers={},
r_payload=None,
)
def test_delete_success_options(self):
"""
Test that it is possible to delete a project with options
"""
self.req.content = self.project_content
self.gerrit_con.call.side_effect = [self.req, self.req_delete]
project = Project(self.gerrit_con)
project = project.get_project(self.PROJECT)
self.assertTrue(project.delete({'force': True}))
self.gerrit_con.call.assert_called_with(
request='delete',
r_endpoint='/a/projects/{}'.format(self.PROJECT),
r_headers={},
r_payload={'force': True},
)
def test_delete_fails(self):
"""
Test that failing to delete a project raises
"""
self.req_delete.status_code = 400
self.req.content = self.project_content
self.gerrit_con.call.side_effect = [self.req, self.req_delete]
project = Project(self.gerrit_con)
project = project.get_project(self.PROJECT)
with self.assertRaises(UnhandledError):
project.delete()
def test_project_eq(self):
"""
Test that projects can be compared
"""
self.req.content = self.project_content
project = Project(self.gerrit_con)
project1 = project.get_project(self.PROJECT)
project2 = project.get_project(self.PROJECT)
self.assertEqual(project1, project2)
|
{
"content_hash": "b4647c2d5041dfc228139ef8010bd00e",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 78,
"avg_line_length": 33.16931216931217,
"alnum_prop": 0.5849417769979263,
"repo_name": "propyless/python-gerrit",
"id": "80565e239c3ba0a69a9568e0c94c9945af07d614",
"size": "6269",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit_tests/tests_project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "68075"
},
{
"name": "Shell",
"bytes": "1230"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from setuptools import setup
from setuptools import find_packages
__version__ = '0.6.0'
setup(
author = 'Christian Heitman',
author_email = 'barfframework@gmail.com',
description = 'A multiplatform open source Binary Analysis and Reverse engineering Framework',
download_url = 'https://github.com/programa-stic/barf-project/tarball/v' + __version__,
install_requires = [
'capstone>=3.0.5rc2',
'future',
'networkx',
'pefile',
'pydot',
'pyelftools',
'pygments',
'pyparsing',
],
license = 'BSD 2-Clause',
name = 'barf',
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Security',
'Topic :: Software Development :: Disassemblers',
'Topic :: Software Development :: Interpreters',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = find_packages(exclude=['tests', 'tests.*']),
url = 'http://github.com/programa-stic/barf-project',
entry_points = {
"console_scripts": [
"BARFcfg = barf.tools.cfg.cfg:main",
"BARFcg = barf.tools.cg.cg:main",
"BARFgadgets = barf.tools.gadgets.gadgets:main",
"BARFreplay = barf.tools.replay.replay:main",
]
} ,
version = __version__,
zip_safe = False
)
|
{
"content_hash": "0c55fc9346cde4436809bd34c844bf51",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 103,
"avg_line_length": 35.40816326530612,
"alnum_prop": 0.5579250720461095,
"repo_name": "programa-stic/barf-project",
"id": "f5fee02f1c460d168218dfc9c38bef28efc9ba08",
"size": "1759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "4766"
},
{
"name": "Dockerfile",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "1359"
},
{
"name": "Python",
"bytes": "1105738"
}
],
"symlink_target": ""
}
|
import os
import gym
import numpy as np
import tensorflow as tf
from gym import spaces
from collections import deque
def sample(logits):
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
def cat_entropy(logits):
"""More computationally stable than softmax followed by entropy."""
a0 = logits - tf.reduce_max(logits, 1, keep_dims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)
def mse(pred, target):
return tf.square(pred-target)/2.
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
with tf.variable_scope(key):
return tf.trainable_variables()
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
schedules = {
'linear': linear,
'constant': constant
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs, maxlen=40):
self.episode_rewards = [[] for _ in range(nenvs)]
self.lenbuffer = deque(maxlen=maxlen) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=maxlen) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(self.nenvs):
for j in range(self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
|
{
"content_hash": "e3323c342b46e3e0caaa9071db7bf350",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 83,
"avg_line_length": 29.09,
"alnum_prop": 0.6012375386730835,
"repo_name": "8enmann/model-based-rl",
"id": "cac9985282823c8310da530cd828637cf8588b2f",
"size": "2909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "582429"
},
{
"name": "Python",
"bytes": "48599"
}
],
"symlink_target": ""
}
|
"""
Usage:
pywd --length=<len> [--numbers] [--letters] [--symbols]
[-u | --uppercase]
pywd -v | --version
pywd -h | --help
Examples:
pywd --length=14 --numbers --symbols
pywd --length=10 --symbols
pywd --length=10 --letters --numbers
Options:
-h --help Show this screen.
-v --version Show version.
"""
from docopt import docopt
from pywd import __version__
from string import ascii_lowercase, ascii_uppercase, digits, punctuation
from random import choice, sample
def generate():
version = ".".join(str(x) for x in __version__)
arguments = docopt(__doc__, version=version)
length = int(arguments.get("--length"))
numbers = arguments.get("--numbers")
letters = arguments.get("--letters")
symbols = arguments.get("--symbols")
uppercase = arguments.get("--uppercase")
print(create_password(length, numbers, letters, symbols, uppercase))
def create_password(length, numbers, letters, symbols, uppercase):
password = ""
while len(password) < length:
if numbers and len(password) < length:
password += choice(digits)
if letters and len(password) < length:
password += choice(ascii_lowercase)
if symbols and len(password) < length:
password += choice(punctuation)
if uppercase and len(password) < length:
password += choice(ascii_uppercase)
return ''.join(sample(password, len(password)))
|
{
"content_hash": "17a7b6d1c362ef538bf1244d5b5dddd4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 72,
"avg_line_length": 32.48888888888889,
"alnum_prop": 0.6320109439124487,
"repo_name": "captainsafia/pywd",
"id": "254d2da17bc7a70cc0198c8800421fb63bbb6d07",
"size": "1462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywd/pywd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3191"
}
],
"symlink_target": ""
}
|
"""EVARILOS Central Engine - Calculation of metrics for indoor localization performance benchmarking."""
__author__ = "Filip Lemic"
__copyright__ = "Copyright 2015, EVARILOS Project"
__version__ = "1.0.0"
__maintainer__ = "Filip Lemic"
__email__ = "lemic@tkn.tu-berlin.de"
__status__ = "Development"
import sys
import urllib
import urllib2
import json
import time
import math
import numpy
import datetime
import protobuf_json
from flask import url_for
from datetime import timedelta
import message_evarilos_engine_type1_pb2
import message_evarilos_engine_type2_pb2
import message_evarilos_engine_type1_presentation_pb2
import message_evarilos_engine_type2_presentation_pb2
import experiment_results_pb2
from flask import Flask, request, jsonify
from generateURL import RequestWithMethod
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
try:
h['Access-Control-Allow-Methods'] = get_methods()
except:
h['Access-Control-Allow-Methods'] = 'OPTIONS'
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
#################################################################################################
### Task Listing
#################################################################################################
app = Flask(__name__)
@app.route("/")
@crossdomain(origin='*')
def hello():
response = {'EVARILOS Central Engine': 'This is a prototype of the ECE (EVARILOS Cenatral Engine) service for the EVARILOS project',
'Description of message types': url_for("messages", _external = True)}
return json.dumps(response)
#######################################################################################################
# Task 1: Get the list of all message descriptions
#######################################################################################################
@app.route('/evarilos/ece/v1.0', methods = ['GET'])
@crossdomain(origin='*')
def messages():
message_list = {}
message_list['Message Type 1'] = {}
message_list['Message Type 1']['URL'] = url_for("type1_present", _external = True)
message_list['Message Type 1']['Description'] = "This message is used when one has a set of evaluated locations (at least ground truth + location estimate) and wants to use ECE service to calculate and optionally store metrics."
message_list['Message Type 1']['Usage'] = url_for("figure1", _external = True)
message_list['Message Type 2'] = {}
message_list['Message Type 2']['URL'] = url_for("type3_present", _external = True)
message_list['Message Type 2']['Description'] = "This message is used when one wants to run experiments and calculation of metrics in the real time."
message_list['Message Type 2']['Usage'] = url_for("figure3", _external = True)
return json.dumps(message_list)
@app.route('/evarilos/ece/v1.0/type1/usage')
@crossdomain(origin='*')
def figure1():
with open('figures/type1.jpg', 'rb') as image_file:
def wsgi_app(environ, start_response):
start_response('200 OK', [('Content-type', 'image/jpeg')])
return image_file.read()
return make_response(wsgi_app)
@app.route('/evarilos/ece/v1.0/type2/usage')
@crossdomain(origin='*')
def figure3():
with open('figures/type2.jpg', 'rb') as image_file:
def wsgi_app(environ, start_response):
start_response('200 OK', [('Content-type', 'image/jpeg')])
return image_file.read()
return make_response(wsgi_app)
#################################################################################################
### Type 1 Communication: Calculating and Storing Metrics
#################################################################################################
@app.route("/evarilos/ece/v1.0/calculate_and_store_metrics", methods=['POST'])
@crossdomain(origin='*')
def type1():
try:
experiment = message_evarilos_engine_type1_pb2.ece_type1()
experiment.ParseFromString(request.data)
except:
return json.dumps('Experiment is not well defined!')
experiment_results = experiment_results_pb2.Experiment()
localization_error_2D = {}
localization_error_3D = {}
latency = {}
power_consumption = {}
number_of_points = 0
number_of_good_rooms = {}
for location in experiment.locations:
number_of_points += 1
measurement_location = experiment_results.locations.add()
measurement_location.point_id = location.point_id
try:
measurement_location.localized_node_id = location.localized_node_id
except:
pass
measurement_location.true_coordinate_x = x1 = location.true_coordinate_x
measurement_location.true_coordinate_y = y1 = location.true_coordinate_y
measurement_location.est_coordinate_x = x2 = location.est_coordinate_x
measurement_location.est_coordinate_y = y2 = location.est_coordinate_y
measurement_location.localization_error_2D = math.sqrt(math.pow((x1-x2), 2) + math.pow((y1-y2), 2))
localization_error_2D[number_of_points] = measurement_location.localization_error_2D
try:
measurement_location.true_coordinate_z = z1 = location.true_coordinate_z
measurement_location.est_coordinate_z = z2 = location.est_coordinate_z
measurement_location.localization_error_3D = math.sqrt(math.pow((x1-x2), 2) + math.pow((y1-y2), 2) + math.pow((z1-z2), 2))
localization_error_3D[number_of_points] = measurement_location.localization_error_3D
except:
pass
try:
measurement_location.true_room_label = room1 = location.true_room_label
measurement_location.est_room_label = room2 = location.est_room_label
if room1.strip() == room2.strip():
measurement_location.localization_correct_room = 1
number_of_good_rooms[number_of_points] = 1
else:
measurement_location.localization_correct_room = 0
number_of_good_rooms[number_of_points] = 0
except:
pass
try:
measurement_location.latency = location.latency
latency[number_of_points] = location.latency
except:
pass
try:
measurement_location.power_consumption = location.power_consumption
power_consumption[number_of_points] = location.power_consumption
except:
pass
experiment_results.primary_metrics.accuracy_error_2D_average = float(sum(localization_error_2D.values()))/number_of_points
experiment_results.primary_metrics.accuracy_error_2D_min = min(localization_error_2D.values())
experiment_results.primary_metrics.accuracy_error_2D_max = max(localization_error_2D.values())
experiment_results.primary_metrics.accuracy_error_2D_variance = numpy.var(localization_error_2D.values())
experiment_results.primary_metrics.accuracy_error_2D_median = numpy.median(localization_error_2D.values())
experiment_results.primary_metrics.accuracy_error_2D_75_percentile = numpy.percentile(localization_error_2D.values(), 75)
experiment_results.primary_metrics.accuracy_error_2D_90_percentile = numpy.percentile(localization_error_2D.values(), 90)
experiment_results.primary_metrics.accuracy_error_2D_rms = math.sqrt( (1 / float(number_of_points)) * numpy.sum( numpy.power( localization_error_2D.values(), 2)))
if len(localization_error_3D) != 0:
experiment_results.primary_metrics.accuracy_error_3D_average = float(sum(localization_error_3D.values()))/number_of_points
experiment_results.primary_metrics.accuracy_error_3D_min = min(localization_error_3D.values())
experiment_results.primary_metrics.accuracy_error_3D_max = max(localization_error_3D.values())
experiment_results.primary_metrics.accuracy_error_3D_variance = numpy.var(localization_error_3D.values())
experiment_results.primary_metrics.accuracy_error_3D_median = numpy.median(localization_error_3D.values())
experiment_results.primary_metrics.accuracy_error_3D_75_percentile = numpy.percentile(localization_error_3D.values(), 75)
experiment_results.primary_metrics.accuracy_error_3D_90_percentile = numpy.percentile(localization_error_3D.values(), 90)
experiment_results.primary_metrics.accuracy_error_3D_rms = math.sqrt( (1 / float(number_of_points)) * numpy.sum( numpy.power( localization_error_3D.values(), 2)))
if len(number_of_good_rooms) != 0:
experiment_results.primary_metrics.room_accuracy_error_average = float(sum(number_of_good_rooms.values()))/number_of_points
if len(latency) != 0:
experiment_results.primary_metrics.latency_average = float(sum(latency.values()))/number_of_points
experiment_results.primary_metrics.latency_min = min(latency.values())
experiment_results.primary_metrics.latency_max = max(latency.values())
experiment_results.primary_metrics.latency_variance = numpy.var(latency.values())
experiment_results.primary_metrics.latency_median = numpy.median(latency.values())
experiment_results.primary_metrics.latency_75_percentile = numpy.percentile(latency.values(), 75)
experiment_results.primary_metrics.latency_90_percentile = numpy.percentile(latency.values(), 90)
experiment_results.primary_metrics.latency_rms = math.sqrt( (1 / float(number_of_points)) * numpy.sum( numpy.power( latency.values(), 2)))
if len(power_consumption) != 0:
experiment_results.primary_metrics.power_consumption_average = float(sum(power_consumption.values()))/number_of_points
experiment_results.primary_metrics.power_consumption_median = numpy.median(power_consumption.values())
experiment_results.primary_metrics.power_consumption_min = min(power_consumption.values())
experiment_results.primary_metrics.power_consumption_max = max(power_consumption.values())
experiment_results.primary_metrics.power_consumption_variance = numpy.var(power_consumption.values())
experiment_results.primary_metrics.power_consumption_75_percentile = numpy.percentile(power_consumption.values(), 75)
experiment_results.primary_metrics.power_consumption_90_percentile = numpy.percentile(power_consumption.values(), 90)
experiment_results.primary_metrics.power_consumption_rms = math.sqrt( (1 / float(number_of_points)) * numpy.sum( numpy.power( power_consumption.values(), 2)))
else:
try:
experiment_results.primary_metrics.power_consumption_average = experiment.power_consumption_per_experiment
except:
pass
experiment_results.scenario.testbed_label = experiment.scenario.testbed_label
experiment_results.scenario.testbed_description = experiment.scenario.testbed_description
experiment_results.scenario.experiment_description = experiment.scenario.experiment_description
experiment_results.sut.sut_name = experiment.scenario.sut_description
experiment_results.scenario.receiver_description = experiment.scenario.receiver_description
experiment_results.scenario.sender_description = experiment.scenario.sender_description
experiment_results.scenario.interference_description = experiment.scenario.interference_description
experiment_results.timestamp_utc = experiment.timestamp_utc
experiment_results.experiment_label = experiment.experiment_label
obj = json.dumps(protobuf_json.pb2json(experiment_results))
response = {}
if experiment.store_metrics is True:
apiURL_metrics = experiment.metrics_storage_URI
db_id = experiment.metrics_storage_database
req = urllib2.Request(apiURL_metrics + 'evarilos/metrics/v1.0/database', headers={"Content-Type": "application/json"}, data = db_id)
resp = urllib2.urlopen(req)
coll_id = experiment.metrics_storage_collection
req = RequestWithMethod(apiURL_metrics + 'evarilos/metrics/v1.0/database/' + db_id + '/experiment', 'POST', headers={"Content-Type": "application/json"}, data = coll_id)
resp = urllib2.urlopen(req)
req = urllib2.Request(apiURL_metrics + 'evarilos/metrics/v1.0/database/' + db_id + '/experiment/' + coll_id, headers={"Content-Type": "application/json"}, data = obj)
resp = urllib2.urlopen(req)
response = protobuf_json.pb2json(experiment_results)
return json.dumps(response)
#################################################################################################
### Type 2 Communication: Online calculation
#################################################################################################
@app.route("/evarilos/ece/v1.0/add_one_location", methods=['POST'])
@crossdomain(origin='*')
def type3():
try:
experiment = message_evarilos_engine_type2_pb2.ece_type2()
experiment.ParseFromString(request.data)
except:
return json.dumps('Experiment is not well defined!')
response ={}
if experiment.store_metrics is True:
apiURL_metrics = experiment.metrics_storage_URI
db_id = experiment.metrics_storage_database
req = urllib2.Request(apiURL_metrics + 'evarilos/metrics/v1.0/database', headers={"Content-Type": "application/json"}, data = db_id)
resp = urllib2.urlopen(req)
coll_id = experiment.metrics_storage_collection
req = RequestWithMethod(apiURL_metrics + 'evarilos/metrics/v1.0/database/' + db_id + '/experiment', 'POST', headers={"Content-Type": "application/json"}, data = coll_id)
resp = urllib2.urlopen(req)
try:
req = RequestWithMethod(apiURL_metrics + 'evarilos/metrics/v1.0/database/' + db_id + '/experiment/' + coll_id, 'GET', headers={"Content-Type": "application/x-protobuf"}, data = 'protobuf')
resp = urllib2.urlopen(req)
message = resp.read()
experiment_results = experiment_results_pb2.Experiment()
experiment_results.ParseFromString(message)
except:
experiment_results = experiment_results_pb2.Experiment()
localization_error_2D = {}
localization_error_3D = {}
latency = {}
power_consumption = {}
number_of_points = 0
number_of_good_rooms = {}
for location in experiment_results.locations:
number_of_points += 1
x1 = location.true_coordinate_x
y1 = location.true_coordinate_y
x2 = location.est_coordinate_x
y2 = location.est_coordinate_y
localization_error_2D[number_of_points] = math.sqrt(math.pow((x1-x2), 2) + math.pow((y1-y2), 2))
try:
z1 = location.true_coordinate_z
z2 = location.est_coordinate_z
localization_error_3D[number_of_points] = math.sqrt(math.pow((x1-x2), 2) + math.pow((y1-y2), 2) + math.pow((z1-z2), 2))
except:
pass
try:
room1 = location.true_room_label
room2 = location.est_room_label
if room1.strip() == room2.strip():
number_of_good_rooms[number_of_points] = 1
else:
number_of_good_rooms[number_of_points] = 0
except:
pass
try:
latency[number_of_points] = location.latency
except:
pass
try:
power_consumption[number_of_points] = location.power_consumption
except:
pass
# Get location estimate from the SUT
if experiment.request_estimates is True:
time1 = time.time()
req = urllib2.Request(str(experiment.sut_location_estimate_URI), headers={"Content-Type": "application/json"})
resp = urllib2.urlopen(req)
time2 = time.time()
loc_est_latency = time2 - time1
estimated_location = json.loads(resp.read())
else:
estimated_location = {}
try:
estimated_location['coordinate_x'] = experiment.estimate.est_coordinate_x
estimated_location['coordinate_y'] = experiment.estimate.est_coordinate_y
except:
return json.dumps('Define the location estimate in the message!')
try:
estimated_location['coordinate_z'] = experiment.estimate.est_coordinate_z
except:
pass
try:
estimated_location['room_label'] = experiment.estimate.est_room_label
except:
pass
measurement_location = experiment_results.locations.add()
number_of_points += 1
measurement_location.point_id = experiment.ground_truth.point_id
try:
measurement_location.localized_node_id = experiment.ground_truth.localized_node_id
except:
pass
measurement_location.true_coordinate_x = x1 = experiment.ground_truth.true_coordinate_x
measurement_location.true_coordinate_y = y1 = experiment.ground_truth.true_coordinate_y
measurement_location.est_coordinate_x = x2 = estimated_location['coordinate_x']
measurement_location.est_coordinate_y = y2 = estimated_location['coordinate_y']
measurement_location.localization_error_2D = math.sqrt(math.pow((x1-x2), 2) + math.pow((y1-y2), 2))
localization_error_2D[number_of_points] = measurement_location.localization_error_2D
try:
measurement_location.true_coordinate_z = z1 = experiment.ground_truth.true_coordinate_z
measurement_location.est_coordinate_z = z2 = estimated_location['coordinate_z']
measurement_location.localization_error_3D = math.sqrt(math.pow((x1-x2), 2) + math.pow((y1-y2), 2) + math.pow((z1-z2), 2))
localization_error_3D[number_of_points] = measurement_location.localization_error_3D
except:
time.sleep(0)
try:
measurement_location.true_room_label = room1 = experiment.ground_truth.true_room_label
measurement_location.est_room_label = room2 = estimated_location['room_label']
if room1.strip() == room2.strip():
measurement_location.localization_correct_room = 1
number_of_good_rooms[number_of_points] = 1
else:
measurement_location.localization_correct_room = 0
number_of_good_rooms[number_of_points] = 0
except:
pass
try:
measurement_location.latency = latency[number_of_points] = loc_est_latency
except:
pass
try:
measurement_location.latency = latency[number_of_points] = experiment.estimate.latency
except:
pass
try:
measurement_location.power_consumption = location.power_consumption
power_consumption[number_of_points] = measurement_location.power_consumption
except:
pass
try:
measurement_location.power_consumption = experiment.estimate.power_consumption
power_consumption[number_of_points] = measurement_location.power_consumption
except:
pass
experiment_results.primary_metrics.error_2D_average = float(sum(localization_error_2D.values()))/number_of_points
experiment_results.primary_metrics.error_2D_min = min(localization_error_2D.values())
experiment_results.primary_metrics.error_2D_max = max(localization_error_2D.values())
experiment_results.primary_metrics.error_2D_std = numpy.std(localization_error_2D.values())
experiment_results.primary_metrics.error_2D_median = numpy.median(localization_error_2D.values())
if len(localization_error_3D) != 0:
experiment_results.primary_metrics.error_3D_average = float(sum(localization_error_3D.values()))/number_of_points
experiment_results.primary_metrics.error_3D_min = min(localization_error_3D.values())
experiment_results.primary_metrics.error_3D_max = max(localization_error_3D.values())
experiment_results.primary_metrics.error_3D_std = numpy.std(localization_error_3D.values())
experiment_results.primary_metrics.error_3D_median = numpy.median(localization_error_3D.values())
if len(number_of_good_rooms) != 0:
experiment_results.primary_metrics.room_error_average = float(sum(number_of_good_rooms.values()))/number_of_points
print number_of_good_rooms.values()
print number_of_points
if len(latency) != 0:
experiment_results.primary_metrics.latency_average = float(sum(latency.values()))/number_of_points
experiment_results.primary_metrics.latency_min = min(latency.values())
experiment_results.primary_metrics.latency_max = max(latency.values())
experiment_results.primary_metrics.latency_std = numpy.std(latency.values())
experiment_results.primary_metrics.latency_median = numpy.median(latency.values())
if sum(power_consumption) != 0:
experiment_results.primary_metrics.power_consumption_average = float(sum(power_consumption.values()))/number_of_points
experiment_results.primary_metrics.power_consumption_median = numpy.median(power_consumption.values())
experiment_results.primary_metrics.power_consumption_min = min(power_consumption.values())
experiment_results.primary_metrics.power_consumption_max = max(power_consumption.values())
experiment_results.primary_metrics.power_consumption_std = numpy.std(power_consumption.values())
else:
try:
experiment_results.primary_metrics.power_consumption_average = experiment.power_consumption_per_experiment
except:
pass
experiment_results.scenario.testbed_label = experiment.scenario.testbed_label
experiment_results.scenario.testbed_description = experiment.scenario.testbed_description
experiment_results.scenario.experiment_description = experiment.scenario.experiment_description
experiment_results.sut.sut_name = experiment.scenario.sut_description
experiment_results.scenario.receiver_description = experiment.scenario.receiver_description
experiment_results.scenario.sender_description = experiment.scenario.sender_description
experiment_results.scenario.interference_description = experiment.scenario.interference_description
experiment_results.timestamp_utc = experiment.timestamp_utc
experiment_results.experiment_label = experiment.experiment_label
obj = json.dumps(protobuf_json.pb2json(experiment_results))
if experiment.store_metrics is True:
apiURL_metrics = experiment.metrics_storage_URI
db_id = experiment.metrics_storage_database
req = urllib2.Request(apiURL_metrics + 'evarilos/metrics/v1.0/database', headers={"Content-Type": "application/json"}, data = db_id)
resp = urllib2.urlopen(req)
coll_id = experiment.metrics_storage_collection
req = RequestWithMethod(apiURL_metrics + 'evarilos/metrics/v1.0/database/' + db_id + '/experiment/' + coll_id, 'DELETE', headers={"Content-Type": "application/json"}, data = coll_id)
resp = urllib2.urlopen(req)
req = RequestWithMethod(apiURL_metrics + 'evarilos/metrics/v1.0/database/' + db_id + '/experiment', 'POST', headers={"Content-Type": "application/json"}, data = coll_id)
resp = urllib2.urlopen(req)
req = urllib2.Request(apiURL_metrics + 'evarilos/metrics/v1.0/database/' + db_id + '/experiment/' + coll_id, headers={"Content-Type": "application/json"}, data = obj)
resp = urllib2.urlopen(req)
response = protobuf_json.pb2json(experiment_results)
return json.dumps(response)
##################################################################################################
### Type 1 Communication: Calculating and Storing Metrics
#################################################################################################
@app.route("/evarilos/ece/v1.0/calculate_and_store_metrics", methods=['GET'])
@crossdomain(origin='*')
def type1_present():
experiment = message_evarilos_engine_type1_presentation_pb2.ece_type1()
with open("message_type_1.pb", "rb") as f:
experiment.ParseFromString(f.read())
return json.dumps(protobuf_json.pb2json(experiment))
#################################################################################################
### Type 2 Communication: Online calculation
#################################################################################################
@app.route("/evarilos/ece/v1.0/online_calculation", methods=['GET'])
@crossdomain(origin='*')
def type3_present():
experiment = message_evarilos_engine_type2_presentation_pb2.ece_type2()
with open("message_type_2.pb", "rb") as f:
experiment.ParseFromString(f.read())
return json.dumps(protobuf_json.pb2json(experiment))
#######################################################################################################
# Additional help functions
#######################################################################################################
# Error handler
@app.errorhandler(404)
@crossdomain(origin='*')
def not_found(error):
return make_response(jsonify( { 'error': 'Not found' } ), 404)
# Creating the URIs
def make_public_task(function):
new_function = {}
for field in function:
if field == 'id':
new_function['uri'] = url_for('get_function', function_id = function['id'], _external = True)
else:
new_function[field] = function[field]
return new_function
# Enabling DELETE, PUT, etc.
class RequestWithMethod(urllib2.Request):
"""Workaround for using DELETE with urllib2"""
def __init__(self, url, method, data=None, headers={}, origin_req_host=None, unverifiable=False):
self._method = method
urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
def get_method(self):
if self._method:
return self._method
else:
return urllib2.Request.get_method(self)
if __name__ == "__main__":
app.run(host = '0.0.0.0', port = 5002, debug = 'True')
|
{
"content_hash": "4d640368f47057dd6136cbe026780ed8",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 232,
"avg_line_length": 50.86988847583643,
"alnum_prop": 0.6460830166617948,
"repo_name": "evarilos/ECE-EVARILOS",
"id": "054cd3fff18aaf36b5aa9a6ecf7d6994311d57e0",
"size": "27415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evarilos_central_engine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Protocol Buffer",
"bytes": "12664"
},
{
"name": "PureBasic",
"bytes": "6885"
},
{
"name": "Python",
"bytes": "32495"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import mxnet as mx
import mxnext as X
from mxnext.backbone.resnet_v1 import Builder
bn_count = [10000]
class TridentResNetV1Builder(Builder):
def __init__(self):
super().__init__()
@staticmethod
def bn_shared(data, name, normalizer, branch_ids=None, share_weight=True):
if branch_ids is None:
branch_ids = range(len(data))
gamma = X.var(name + "_gamma")
beta = X.var(name + "_beta")
moving_mean = X.var(name + "_moving_mean")
moving_var = X.var(name + "_moving_var")
bn_layers = []
for i, data_i in zip(branch_ids, data):
if share_weight:
bn_i = normalizer(data=data_i, name=name + "_shared%d" % i,
gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var)
else:
bn_i = normalizer(data=data_i, name=name + "_branch%d" % i)
bn_layers.append(bn_i)
return bn_layers
@staticmethod
def conv_shared(data, name, kernel, num_filter, branch_ids=None, no_bias=True, share_weight=True,
pad=(0, 0), stride=(1, 1), dilate=(1, 1)):
if branch_ids is None:
branch_ids = range(len(data))
weight = X.var(name + '_weight')
if no_bias:
bias = None
else:
bias = X.var(name + '_bias')
conv_layers = []
for i in range(len(data)):
data_i = data[i]
stride_i = stride[i] if type(stride) is list else stride
dilate_i = dilate[i] if type(dilate) is list else dilate
pad_i = pad[i] if type(pad) is list else pad
branch_i = branch_ids[i]
if share_weight:
conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i,
name=name + '_shared%d' % branch_i, no_bias=no_bias, weight=weight, bias=bias)
else:
conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i,
name=name + '_branch%d' % branch_i, no_bias=no_bias)
conv_layers.append(conv_i)
return conv_layers
@staticmethod
def deform_conv_shared(data, name, conv_offset, kernel, num_filter, branch_ids=None, no_bias=True, share_weight=True,
num_deformable_group=4, pad=(0, 0), stride=(1, 1), dilate=(1, 1)):
if branch_ids is None:
branch_ids = range(len(data))
weight = X.var(name + '_weight')
if no_bias:
bias = None
else:
bias = X.var(name + '_bias')
conv_layers = []
for i in range(len(data)):
data_i = data[i]
stride_i = stride[i] if type(stride) is list else stride
dilate_i = dilate[i] if type(dilate) is list else dilate
pad_i = pad[i] if type(pad) is list else pad
conv_offset_i = conv_offset[i] if type(conv_offset) is list else conv_offset
branch_i = branch_ids[i]
if share_weight:
conv_i = mx.contrib.symbol.DeformableConvolution(
data=data_i, offset=conv_offset_i, kernel=kernel, num_filter=num_filter, stride=stride_i, num_deformable_group=4,
dilate=dilate_i, pad=pad_i, no_bias=no_bias, weight=weight, bias=bias, name=name + '_shared%d' % branch_i)
else:
conv_i = mx.contrib.symbol.DeformableConvolution(
data=data_i, offset=conv_offset_i, kernel=kernel, num_filter=num_filter, stride=stride_i, num_deformable_group=4,
dilate=dilate_i, pad=pad_i, no_bias=no_bias, name=name + '_branch%d' % branch_i)
conv_layers.append(conv_i)
return conv_layers
@staticmethod
def stack_branch_symbols(data_list):
data = mx.symbol.stack(*data_list, axis=1)
data = mx.symbol.Reshape(data, (-3, -2))
return data
@classmethod
def resnet_trident_unit(cls, data, name, filter, stride, dilate, proj, norm_type, norm_mom, ndev,
branch_ids, branch_bn_shared, branch_conv_shared, branch_deform=False):
"""
One resnet unit is comprised of 2 or 3 convolutions and a shortcut.
:param data:
:param name:
:param filter:
:param stride:
:param dilate:
:param proj:
:param norm_type:
:param norm_mom:
:param ndev:
:param branch_ids:
:param branch_bn_shared:
:param branch_conv_shared:
:param branch_deform:
:return:
"""
if branch_ids is None:
branch_ids = range(len(data))
norm = X.normalizer_factory(type=norm_type, ndev=ndev, mom=norm_mom)
conv1 = cls.conv_shared(
data, name=name + "_conv1", num_filter=filter // 4, kernel=(1, 1), stride=stride,
branch_ids=branch_ids, share_weight=branch_conv_shared)
bn1 = cls.bn_shared(
conv1, name=name + "_bn1", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared)
relu1 = [X.relu(bn) for bn in bn1]
if not branch_deform:
conv2 = cls.conv_shared(
relu1, name=name + "_conv2", num_filter=filter // 4, kernel=(3, 3),
pad=dilate, dilate=dilate,
branch_ids=branch_ids, share_weight=branch_conv_shared)
else:
conv2_offset = cls.conv_shared(
relu1, name=name + "_conv2_offset", num_filter=72, kernel=(3, 3),
pad=(1, 1), stride=(1, 1), dilate=(1, 1), no_bias=False,
branch_ids=branch_ids, share_weight=branch_conv_shared)
conv2 = cls.deform_conv_shared(
relu1, name=name + "_conv2", conv_offset=conv2_offset, num_filter=filter // 4, kernel=(3, 3),
pad=dilate, dilate=dilate, num_deformable_group=4,
branch_ids=branch_ids, share_weight=branch_conv_shared)
bn2 = cls.bn_shared(
conv2, name=name + "_bn2", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared)
relu2 = [X.relu(bn) for bn in bn2]
conv3 = cls.conv_shared(
relu2, name=name + "_conv3", num_filter=filter, kernel=(1, 1),
branch_ids=branch_ids, share_weight=branch_conv_shared)
bn3 = cls.bn_shared(
conv3, name=name + "_bn3", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared)
if proj:
shortcut = cls.conv_shared(
data, name=name + "_sc", num_filter=filter, kernel=(1, 1),
branch_ids=branch_ids, share_weight=branch_conv_shared)
shortcut = cls.bn_shared(
shortcut, name=name + "_sc_bn", normalizer=norm, branch_ids=branch_ids,
share_weight=branch_bn_shared)
else:
shortcut = data
plus = [X.add(bn3_i, shortcut_i, name=name + "_plus_branch{}".format(i)) \
for i, bn3_i, shortcut_i in zip(branch_ids, bn3, shortcut)]
return [X.relu(p) for p in plus]
@classmethod
def resnet_trident_stage(cls, data, name, num_block, filter, stride, dilate, norm_type, norm_mom, ndev,
num_trident_block, num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform):
"""
One resnet stage is comprised of multiple resnet units. Refer to depth config for more information.
:param data:
:param name:
:param num_block:
:param filter:
:param stride:
:param dilate:
:param norm_type:
:param norm_mom:
:param ndev:
:param num_branch:
:param branch_ids:
:param branch_bn_shared:
:param branch_conv_shared:
:return:
"""
assert isinstance(dilate, list) and len(dilate) == num_branch, 'dilate should be a list with num_branch items.'
num_trident_block = num_trident_block or (num_block - 1) # transform all blocks by default
d = [(d, d) for d in dilate]
data = cls.resnet_unit(data, "{}_unit1".format(name), filter, stride, 1, True, norm_type, norm_mom, ndev)
for i in range(2, num_block + 1):
# [i ... num_block] == [1 ... num_trident_block]
if i == (num_block - num_trident_block + 1):
data = [data] * num_branch
if i >= (num_block - num_trident_block + 1):
if branch_deform and i >= num_block - 2:
unit_deform = True
else:
unit_deform = False
# cast back to fp32 as deformable conv is not optimized for fp16
if unit_deform and i == num_block - 2:
for j in range(num_branch):
data[j] = X.to_fp32(data[j], name="deform_to32")
data = cls.resnet_trident_unit(
data, "{}_unit{}".format(name, i), filter, (1, 1), d, False, norm_type, norm_mom, ndev,
branch_ids, branch_bn_shared, branch_conv_shared, branch_deform=unit_deform)
else:
data = cls.resnet_unit(data, "{}_unit{}".format(name, i), filter, 1, 1, False, norm_type, norm_mom, ndev)
return data
@classmethod
def resnet_trident_c4(cls, data, num_block, stride, dilate, norm_type, norm_mom, ndev, num_trident_block,
num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform):
return cls.resnet_trident_stage(
data, "stage3", num_block, 1024, stride, dilate, norm_type, norm_mom, ndev, num_trident_block,
num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform)
@classmethod
def resnet_factory(cls, depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type="local", norm_mom=0.9, ndev=None, fp16=False):
num_c2_unit, num_c3_unit, num_c4_unit, num_c5_unit = cls.depth_config[depth]
data = X.var("data")
if fp16:
data = X.to_fp16(data, "data_fp16")
c1 = cls.resnet_c1(data, use_3x3_conv0, use_bn_preprocess, norm_type, norm_mom, ndev)
c2 = cls.resnet_c2(c1, num_c2_unit, 1, 1, norm_type, norm_mom, ndev)
c3 = cls.resnet_c3(c2, num_c3_unit, 2, 1, norm_type, norm_mom, ndev)
c4 = cls.resnet_trident_c4(c3, num_c4_unit, 2, branch_dilates, norm_type, norm_mom, ndev, num_trident_block,
num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform)
# stack branch features and merge into batch dim
c4 = cls.stack_branch_symbols(c4)
c5 = cls.resnet_c5(c4, num_c5_unit, 1, 2, norm_type, norm_mom, ndev)
return c1, c2, c3, c4, c5
@classmethod
def resnet_c4_factory(cls, depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type="local", norm_mom=0.9, ndev=None, fp16=False):
c1, c2, c3, c4, c5 = cls.resnet_factory(depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type, norm_mom, ndev, fp16)
return c4
@classmethod
def resnet_c4c5_factory(cls, depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type="local", norm_mom=0.9, ndev=None, fp16=False):
c1, c2, c3, c4, c5 = cls.resnet_factory(depth, use_3x3_conv0, use_bn_preprocess,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type, norm_mom, ndev, fp16)
return c4, c5
def get_backbone(self, depth, endpoint, normalizer, fp16,num_trident_block,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform):
# parse endpoint
if endpoint == "c4":
factory = self.resnet_c4_factory
elif endpoint == "c4c5":
factory = self.resnet_c4c5_factory
else:
raise KeyError("Unknown backbone endpoint {}".format(endpoint))
return factory(depth, False, False, num_trident_block,
num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,
norm_type=normalizer, fp16=fp16)
|
{
"content_hash": "17ea40d371c2b8412e01239dc018974a",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 140,
"avg_line_length": 46.746428571428574,
"alnum_prop": 0.5619222247688899,
"repo_name": "TuSimple/simpledet",
"id": "2ea5f9e43fc203db5286e68f5220e7bab8b8ced5",
"size": "13089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/tridentnet/resnet_v1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "471944"
},
{
"name": "Cuda",
"bytes": "212680"
},
{
"name": "Makefile",
"bytes": "153"
},
{
"name": "Python",
"bytes": "1567733"
},
{
"name": "Shell",
"bytes": "5501"
}
],
"symlink_target": ""
}
|
from .cli import * # NOQA
from .cli import __all__ # NOQA
from .std import TqdmDeprecationWarning
from warnings import warn
warn("This function will be removed in tqdm==5.0.0\n"
"Please use `tqdm.cli.*` instead of `tqdm._main.*`",
TqdmDeprecationWarning, stacklevel=2)
|
{
"content_hash": "d847d0c0d2a9e517d61e10730ee0dbe1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 57,
"avg_line_length": 40.142857142857146,
"alnum_prop": 0.708185053380783,
"repo_name": "cjayb/mne-python",
"id": "07b6730b1e95f85b56f2708d5ce399c730677470",
"size": "281",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "mne/externals/tqdm/_tqdm/_main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "Makefile",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "7901053"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
"""A kernel that creates a new ASCII file with a given size and name.
"""
__author__ = "ExTASY project <ardita.shkurti@nottingham.ac.uk>"
__copyright__ = "Copyright 2015, http://www.extasy-project.org/"
__license__ = "MIT"
from copy import deepcopy
from radical.ensemblemd.exceptions import ArgumentError
from radical.ensemblemd.exceptions import NoKernelConfigurationError
from radical.ensemblemd.engine import get_engine
from radical.ensemblemd.kernel_plugins.kernel_base import KernelBase
# ------------------------------------------------------------------------------
#
_KERNEL_INFO = {
"name": "custom.mdrun",
"description": "Molecular dynamics with the gromacs software package. http://www.gromacs.org/",
"arguments": {
"--size=":
{
"mandatory": True,
"description": "Number of threads that mdrun should use"
},
"--tpr=":
{
"mandatory": True,
"description": "Input file as a portable binary run file - .tpr - containing the starting structure of the simulation, the molecular topology and all simulation parameters."
},
'--trr=':
{
"mandatory": False,
"description": "Output file"
},
"--edr=":
{
"mandatory": False,
"description": "Output file"
},
"--out=":
{
"mandatory": True,
"description": "Output coordinate file"
}
},
"machine_configs":
{
"*": {
"environment" : {"FOO": "bar"},
"pre_exec" : [],
"executable" : "mdrun",
"uses_mpi" : True
},
"xsede.stampede":
{
"environment" : {},
"pre_exec" : ["module reset","module load intel/15.0.2","module load boost","module load cxx11","module load gromacs"],
"executable" : ["gmx mdrun"],
"uses_mpi" : False
},
"epsrc.archer":
{
"environment" : {},
"pre_exec" : ["module load packages-archer","module load gromacs"],
"executable" : ["gmx mdrun"],
"uses_mpi" : False
},
}
}
# ------------------------------------------------------------------------------
#
class kernel_mdrun(KernelBase):
# --------------------------------------------------------------------------
#
def __init__(self):
"""Le constructor.
"""
super(kernel_mdrun, self).__init__(_KERNEL_INFO)
# --------------------------------------------------------------------------
#
@staticmethod
def get_name():
return _KERNEL_INFO["name"]
# --------------------------------------------------------------------------
#
def _bind_to_resource(self, resource_key):
"""(PRIVATE) Implements parent class method.
"""
if resource_key not in _KERNEL_INFO["machine_configs"]:
if "*" in _KERNEL_INFO["machine_configs"]:
# Fall-back to generic resource key
resource_key = "*"
else:
raise NoKernelConfigurationError(kernel_name=_KERNEL_INFO["name"], resource_key=resource_key)
cfg = _KERNEL_INFO["machine_configs"][resource_key]
arguments = [ '-nt','{0}'.format(self.get_arg("--size=")),
'-s','{0}'.format(self.get_arg('--tpr=')),
'-c','{0}'.format(self.get_arg('--out='))
]
if self.get_arg('--trr=') is not None:
arguments.extend(['-o','{0}'.format(self.get_arg('--trr='))])
if self.get_arg('--edr=') is not None:
arguments.extend(['-e','{0}'.format(self.get_arg('--edr='))])
self._executable = cfg["executable"]
self._arguments = arguments
self._environment = cfg["environment"]
self._uses_mpi = cfg["uses_mpi"]
self._pre_exec = cfg["pre_exec"]
self._post_exec = None
|
{
"content_hash": "5087f5283018908e3b99f305ec4153e2",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 201,
"avg_line_length": 36.16260162601626,
"alnum_prop": 0.4244604316546763,
"repo_name": "radical-cybertools/ExTASY",
"id": "135083325d22d4879dce90b5f540b15c97cf4691",
"size": "4471",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/grlsd-adaptive-on-archer/kernel_defs/mdrun.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1973"
}
],
"symlink_target": ""
}
|
class InterpolationSettings:
section_number: int = 1
interp_per_section: int = 2
interp_method: str = 'equidistant'
same_arc_length_for_all_sections: bool = False
sample_nodes_based_on_arc_length: bool = True
def __init__(self, section_number, interp_per_section, interp_method, same_arc_length_for_all_sections,
sample_nodes_based_on_arc_length):
self.section_number = section_number
self.interp_method = interp_method
self.interp_per_section = interp_per_section
self.same_arc_length_for_all_sections = same_arc_length_for_all_sections
self.sample_nodes_based_on_arc_length = sample_nodes_based_on_arc_length
|
{
"content_hash": "21564c3af0ad85f7e61a6f1b6cbb5a79",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 107,
"avg_line_length": 49.5,
"alnum_prop": 0.6868686868686869,
"repo_name": "schreiberx/sweet",
"id": "bc76644095846ff9dfe621babfc09a5b18d26ba5",
"size": "693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mule_local/python/mule_local/rexi/pcirexi/section/InterpolationSettings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "133036"
},
{
"name": "C++",
"bytes": "2947985"
},
{
"name": "Fortran",
"bytes": "109460"
},
{
"name": "GLSL",
"bytes": "27428"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "2503502"
},
{
"name": "Shell",
"bytes": "490940"
},
{
"name": "TeX",
"bytes": "3093"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import itertools
from collections import defaultdict
from typing import Iterable, Mapping, Sequence, TypeVar
from typing_extensions import Protocol
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import InterpreterConstraintsField, PythonResolveField
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.engine.rules import Get, rule_helper
from pants.engine.target import AllTargets, AllTargetsRequest, FieldSet
from pants.util.ordered_set import OrderedSet
ResolveName = str
class _FieldSetWithResolveAndICs(Protocol):
@property
def resolve(self) -> PythonResolveField:
...
@property
def interpreter_constraints(self) -> InterpreterConstraintsField:
...
_FS = TypeVar("_FS", bound=_FieldSetWithResolveAndICs)
def _partition_by_interpreter_constraints_and_resolve(
field_sets: Sequence[_FS],
python_setup: PythonSetup,
) -> Mapping[tuple[ResolveName, InterpreterConstraints], OrderedSet[_FS]]:
resolve_and_interpreter_constraints_to_field_sets: Mapping[
tuple[str, InterpreterConstraints], OrderedSet[_FS]
] = defaultdict(lambda: OrderedSet())
for field_set in field_sets:
resolve = field_set.resolve.normalized_value(python_setup)
interpreter_constraints = InterpreterConstraints.create_from_compatibility_fields(
[field_set.interpreter_constraints], python_setup
)
resolve_and_interpreter_constraints_to_field_sets[(resolve, interpreter_constraints)].add(
field_set
)
return resolve_and_interpreter_constraints_to_field_sets
@rule_helper
async def _find_all_unique_interpreter_constraints(
python_setup: PythonSetup,
field_set_type: type[FieldSet],
*,
extra_constraints_per_tgt: Iterable[InterpreterConstraintsField] = (),
) -> InterpreterConstraints:
"""Find all unique interpreter constraints used by given field set.
This will find the constraints for each individual matching field set, and then OR across all
unique constraints. Usually, Pants partitions when necessary so that conflicting interpreter
constraints can be handled gracefully. But in some cases, like the `generate-lockfiles` goal,
we need to combine those targets into a single value. This ORs, so that if you have a
==2.7 partition and ==3.6 partition, for example, we return ==2.7 OR ==3.6.
Returns the global interpreter constraints if no relevant targets were matched.
"""
all_tgts = await Get(AllTargets, AllTargetsRequest())
unique_constraints = {
InterpreterConstraints.create_from_compatibility_fields(
[tgt[InterpreterConstraintsField], *extra_constraints_per_tgt], python_setup
)
for tgt in all_tgts
if tgt.has_field(InterpreterConstraintsField) and field_set_type.is_applicable(tgt)
}
if not unique_constraints and extra_constraints_per_tgt:
unique_constraints.add(
InterpreterConstraints.create_from_compatibility_fields(
extra_constraints_per_tgt,
python_setup,
)
)
constraints = InterpreterConstraints(
itertools.chain.from_iterable(ic for ic in unique_constraints if ic)
)
return constraints or InterpreterConstraints(python_setup.interpreter_constraints)
|
{
"content_hash": "7f785dd212ee538b3795fc19d5d88a42",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 98,
"avg_line_length": 39.825581395348834,
"alnum_prop": 0.7322627737226277,
"repo_name": "pantsbuild/pants",
"id": "dab39bb0816a76ea2f02bede78c5531a42c54610",
"size": "3557",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/python/util_rules/partition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Dockerfile",
"bytes": "1132"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "97190"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3771"
},
{
"name": "Python",
"bytes": "7582858"
},
{
"name": "Rust",
"bytes": "1657282"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31400"
},
{
"name": "Starlark",
"bytes": "76892"
}
],
"symlink_target": ""
}
|
import sys
import gevent.monkey
gevent.monkey.patch_all()
import logging
import tempfile
from pprint import pformat
import coverage
import fixtures
import testtools
from testtools import content, content_type
from flexmock import flexmock, Mock
from webtest import TestApp
import contextlib
from vnc_api.vnc_api import *
import cfgm_common.ifmap.client as ifmap_client
import cfgm_common.ifmap.response as ifmap_response
import kombu
import discoveryclient.client as disc_client
import cfgm_common.zkclient
from cfgm_common.uve.vnc_api.ttypes import VncApiConfigLog, VncApiError
from cfgm_common import imid
from test_utils import *
import bottle
bottle.catchall=False
import inspect
import novaclient
import novaclient.client
import gevent.wsgi
import uuid
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
# end lineno
# import from package for non-api server test or directly from file
sys.path.insert(0, '../../../../build/production/api-lib/vnc_api')
sys.path.insert(0, '../../../../distro/openstack/')
sys.path.append('../../../../build/production/config/api-server/vnc_cfg_api_server')
try:
import vnc_cfg_api_server
if not hasattr(vnc_cfg_api_server, 'main'):
from vnc_cfg_api_server import vnc_cfg_api_server
except ImportError:
vnc_cfg_api_server = 'vnc_cfg_api_server could not be imported'
try:
import to_bgp
except ImportError:
try:
from schema_transformer import to_bgp
except ImportError:
to_bgp = 'to_bgp could not be imported'
try:
import svc_monitor
if not hasattr(svc_monitor, 'main'):
from svc_monitor import svc_monitor
except ImportError:
svc_monitor = 'svc_monitor could not be imported'
try:
import device_manager
if not hasattr(device_manager, 'main'):
from device_manager import device_manager
except ImportError:
device_manager = 'device_manager could not be imported'
def generate_conf_file_contents(conf_sections):
cfg_parser = ConfigParser.RawConfigParser()
for (section, var, val) in conf_sections:
try:
cfg_parser.add_section(section)
except ConfigParser.DuplicateSectionError:
pass
if not var:
continue
if val == '':
cfg_parser.set(section, var, 'empty')
else:
cfg_parser.set(section, var, val)
return cfg_parser
# end generate_conf_file_contents
def generate_logconf_file_contents():
cfg_parser = ConfigParser.RawConfigParser()
cfg_parser.add_section('formatters')
cfg_parser.add_section('formatter_simple')
cfg_parser.set('formatters', 'keys', 'simple')
cfg_parser.set('formatter_simple', 'format', '%(name)s:%(levelname)s: %(message)s')
cfg_parser.add_section('handlers')
cfg_parser.add_section('handler_console')
cfg_parser.add_section('handler_api_server_file')
cfg_parser.set('handlers', 'keys', 'console,api_server_file')
cfg_parser.set('handler_console', 'class', 'StreamHandler')
cfg_parser.set('handler_console', 'level', 'WARN')
cfg_parser.set('handler_console', 'args', '[]')
cfg_parser.set('handler_console', 'formatter', 'simple')
cfg_parser.set('handler_api_server_file', 'class', 'FileHandler')
cfg_parser.set('handler_api_server_file', 'level', 'INFO')
cfg_parser.set('handler_api_server_file', 'formatter', 'simple')
cfg_parser.set('handler_api_server_file', 'args', "('api_server.log',)")
cfg_parser.add_section('loggers')
cfg_parser.add_section('logger_root')
cfg_parser.add_section('logger_FakeWSGIHandler')
cfg_parser.set('loggers', 'keys', 'root,FakeWSGIHandler')
cfg_parser.set('logger_root', 'level', 'WARN')
cfg_parser.set('logger_root', 'handlers', 'console')
cfg_parser.set('logger_FakeWSGIHandler', 'level', 'INFO')
cfg_parser.set('logger_FakeWSGIHandler', 'qualname', 'FakeWSGIHandler')
cfg_parser.set('logger_FakeWSGIHandler', 'handlers', 'api_server_file')
return cfg_parser
# end generate_logconf_file_contents
def launch_api_server(listen_ip, listen_port, http_server_port, admin_port,
conf_sections):
args_str = ""
args_str = args_str + "--listen_ip_addr %s " % (listen_ip)
args_str = args_str + "--listen_port %s " % (listen_port)
args_str = args_str + "--http_server_port %s " % (http_server_port)
args_str = args_str + "--admin_port %s " % (admin_port)
args_str = args_str + "--cassandra_server_list 0.0.0.0:9160 "
args_str = args_str + "--log_local "
args_str = args_str + "--log_file api_server_sandesh.log "
import cgitb
cgitb.enable(format='text')
with tempfile.NamedTemporaryFile() as conf, tempfile.NamedTemporaryFile() as logconf:
cfg_parser = generate_conf_file_contents(conf_sections)
cfg_parser.write(conf)
conf.flush()
cfg_parser = generate_logconf_file_contents()
cfg_parser.write(logconf)
logconf.flush()
args_str = args_str + "--conf_file %s " %(conf.name)
args_str = args_str + "--logging_conf %s " %(logconf.name)
vnc_cfg_api_server.main(args_str)
#end launch_api_server
def launch_svc_monitor(api_server_ip, api_server_port):
args_str = ""
args_str = args_str + "--api_server_ip %s " % (api_server_ip)
args_str = args_str + "--api_server_port %s " % (api_server_port)
args_str = args_str + "--http_server_port %s " % (get_free_port())
args_str = args_str + "--ifmap_username api-server "
args_str = args_str + "--ifmap_password api-server "
args_str = args_str + "--cassandra_server_list 0.0.0.0:9160 "
args_str = args_str + "--log_local "
args_str = args_str + "--log_file svc_monitor.log "
svc_monitor.main(args_str)
# end launch_svc_monitor
def kill_svc_monitor(glet):
glet.kill()
svc_monitor.SvcMonitor.reset()
def kill_schema_transformer(glet):
glet.kill()
to_bgp.transformer.ssrc_task.kill()
to_bgp.transformer.arc_task.kill()
to_bgp.transformer.reset()
def launch_schema_transformer(api_server_ip, api_server_port):
args_str = ""
args_str = args_str + "--api_server_ip %s " % (api_server_ip)
args_str = args_str + "--api_server_port %s " % (api_server_port)
args_str = args_str + "--http_server_port %s " % (get_free_port())
args_str = args_str + "--cassandra_server_list 0.0.0.0:9160 "
args_str = args_str + "--log_local "
args_str = args_str + "--log_file schema_transformer.log "
args_str = args_str + "--trace_file schema_transformer.err "
to_bgp.main(args_str)
# end launch_schema_transformer
def launch_device_manager(api_server_ip, api_server_port):
args_str = ""
args_str = args_str + "--api_server_ip %s " % (api_server_ip)
args_str = args_str + "--api_server_port %s " % (api_server_port)
args_str = args_str + "--http_server_port %s " % (get_free_port())
args_str = args_str + "--cassandra_server_list 0.0.0.0:9160 "
args_str = args_str + "--log_local "
args_str = args_str + "--log_file device_manager.log "
device_manager.main(args_str)
# end launch_device_manager
def setup_extra_flexmock(mocks):
for (cls, method_name, val) in mocks:
kwargs = {method_name: val}
flexmock(cls, **kwargs)
# end setup_extra_flexmock
def setup_common_flexmock():
flexmock(cfgm_common.vnc_cpu_info.CpuInfo, __init__=stub)
flexmock(novaclient.client, Client=FakeNovaClient.initialize)
flexmock(ifmap_client.client, __init__=FakeIfmapClient.initialize,
call=FakeIfmapClient.call,
call_async_result=FakeIfmapClient.call_async_result)
flexmock(pycassa.system_manager.Connection, __init__=stub)
flexmock(pycassa.system_manager.SystemManager, create_keyspace=stub,
create_column_family=stub)
flexmock(pycassa.ConnectionPool, __init__=stub)
flexmock(pycassa.ColumnFamily, __new__=FakeCF)
flexmock(pycassa.util, convert_uuid_to_time=Fake_uuid_to_time)
flexmock(disc_client.DiscoveryClient, __init__=stub)
flexmock(disc_client.DiscoveryClient, publish_obj=stub)
flexmock(disc_client.DiscoveryClient, publish=stub)
flexmock(disc_client.DiscoveryClient, subscribe=stub)
flexmock(disc_client.DiscoveryClient, syslog=stub)
flexmock(disc_client.DiscoveryClient, def_pub=stub)
flexmock(kazoo.client.KazooClient, __new__=FakeKazooClient)
flexmock(kazoo.handlers.gevent.SequentialGeventHandler, __init__=stub)
flexmock(kombu.Connection, __new__=FakeKombu.Connection)
flexmock(kombu.Exchange, __new__=FakeKombu.Exchange)
flexmock(kombu.Queue, __new__=FakeKombu.Queue)
flexmock(kombu.Consumer, __new__=FakeKombu.Consumer)
flexmock(kombu.Producer, __new__=FakeKombu.Producer)
flexmock(VncApiConfigLog, __new__=FakeApiConfigLog)
#end setup_common_flexmock
@contextlib.contextmanager
def patch(target_obj, target_method_name, patched):
orig_method = getattr(target_obj, target_method_name)
def patched_wrapper(*args, **kwargs):
return patched(orig_method, *args, **kwargs)
setattr(target_obj, target_method_name, patched_wrapper)
try:
yield
finally:
setattr(target_obj, target_method_name, orig_method)
#end patch
cov_handle = None
class TestCase(testtools.TestCase, fixtures.TestWithFixtures):
_HTTP_HEADERS = {
'Content-type': 'application/json; charset="UTF-8"',
}
def __init__(self, *args, **kwargs):
self._logger = logging.getLogger(__name__)
self._assert_till_max_tries = 30
self._config_knobs = [
('DEFAULTS', '', ''),
]
super(TestCase, self).__init__(*args, **kwargs)
self.addOnException(self._add_detailed_traceback)
def _add_detailed_traceback(self, exc_info):
import cgitb
cgitb.enable(format='text')
from cStringIO import StringIO
tmp_file = StringIO()
cgitb.Hook(format="text", file=tmp_file).handle(exc_info)
tb_str = tmp_file.getvalue()
tmp_file.close()
self.addDetail('detailed-traceback', content.text_content(tb_str))
def _add_detail(self, detail_str):
frame = inspect.stack()[1]
self.addDetail('%s:%s ' %(frame[1],frame[2]), content.text_content(detail_str))
def _add_request_detail(self, op, url, headers=None, query_params=None,
body=None):
request_str = ' URL: ' + pformat(url) + \
' OPER: ' + pformat(op) + \
' Headers: ' + pformat(headers) + \
' Query Params: ' + pformat(query_params) + \
' Body: ' + pformat(body)
self._add_detail('Requesting: ' + request_str)
def _http_get(self, uri, query_params=None):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('GET', url, headers=self._HTTP_HEADERS,
query_params=query_params)
response = self._api_server_session.get(url, headers=self._HTTP_HEADERS,
params=query_params)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_get
def _http_post(self, uri, body):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('POST', url, headers=self._HTTP_HEADERS, body=body)
response = self._api_server_session.post(url, data=body,
headers=self._HTTP_HEADERS)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_post
def _http_delete(self, uri, body):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('DELETE', url, headers=self._HTTP_HEADERS, body=body)
response = self._api_server_session.delete(url, data=body,
headers=self._HTTP_HEADERS)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_delete
def _http_put(self, uri, body):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('PUT', url, headers=self._HTTP_HEADERS, body=body)
response = self._api_server_session.put(url, data=body,
headers=self._HTTP_HEADERS)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_put
def _create_test_objects(self, count=1):
ret_objs = []
for i in range(count):
obj_name = self.id() + '-vn-' + str(i)
obj = VirtualNetwork(obj_name)
self._add_detail('creating-object ' + obj_name)
self._vnc_lib.virtual_network_create(obj)
ret_objs.append(obj)
return ret_objs
def _create_test_object(self):
return self._create_test_objects()[0]
def ifmap_has_ident(self, obj=None, id=None):
if obj:
_type = obj.get_type()
_fq_name = obj.get_fq_name()
if id:
_type = self._vnc_lib.id_to_fq_name_type(id)
_fq_name = self._vnc_lib.id_to_fq_name(id)
ifmap_id = imid.get_ifmap_id_from_fq_name(_type, _fq_name)
if ifmap_id in FakeIfmapClient._graph:
return True
return False
def assertTill(self, expr_or_cb, *cb_args, **cb_kwargs):
tries = 0
while True:
if callable(expr_or_cb):
ret = expr_or_cb(*cb_args, **cb_kwargs)
else:
ret = eval(expr_or_cb)
if ret:
break
tries = tries + 1
if tries >= self._assert_till_max_tries:
raise Exception('Max retries')
self._logger.warn('Retrying at ' + str(inspect.stack()[1]))
gevent.sleep(2)
def setUp(self):
super(TestCase, self).setUp()
global cov_handle
if not cov_handle:
cov_handle = coverage.coverage(source=['./'], omit=['.venv/*'])
#cov_handle.start()
cfgm_common.zkclient.LOG_DIR = './'
gevent.wsgi.WSGIServer.handler_class = FakeWSGIHandler
setup_common_flexmock()
self._api_server_ip = socket.gethostbyname(socket.gethostname())
self._api_server_port = get_free_port()
http_server_port = get_free_port()
self._api_admin_port = get_free_port()
self._api_svr_greenlet = gevent.spawn(launch_api_server,
self._api_server_ip, self._api_server_port,
http_server_port, self._api_admin_port,
self._config_knobs)
block_till_port_listened(self._api_server_ip, self._api_server_port)
extra_env = {'HTTP_HOST':'%s%s' %(self._api_server_ip,
self._api_server_port)}
self._api_svr_app = TestApp(bottle.app(), extra_environ=extra_env)
self._vnc_lib = VncApi('u', 'p', api_server_host=self._api_server_ip,
api_server_port=self._api_server_port)
FakeNovaClient.vnc_lib = self._vnc_lib
self._api_server_session = requests.Session()
adapter = requests.adapters.HTTPAdapter()
self._api_server_session.mount("http://", adapter)
self._api_server_session.mount("https://", adapter)
self._api_server = vnc_cfg_api_server.server
self._api_server._sandesh.set_logging_params(level="SYS_WARN")
self.addCleanup(self.cleanUp)
# end setUp
def cleanUp(self):
self._api_svr_greenlet.kill()
self._api_server._db_conn._msgbus.shutdown()
FakeKombu.reset()
FakeIfmapClient.reset()
CassandraCFs.reset()
FakeExtensionManager.reset()
#cov_handle.stop()
#cov_handle.report(file=open('covreport.txt', 'w'))
# end cleanUp
def get_obj_imid(self, obj):
return 'contrail:%s:%s' %(obj._type, obj.get_fq_name_str())
# end get_obj_imid
def create_virtual_network(self, vn_name, vn_subnet):
vn_obj = VirtualNetwork(name=vn_name)
ipam_fq_name = [
'default-domain', 'default-project', 'default-network-ipam']
ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
cidr = vn_subnet.split('/')
pfx = cidr[0]
pfx_len = int(cidr[1])
subnet_info = IpamSubnetType(subnet=SubnetType(pfx, pfx_len))
subnet_data = VnSubnetsType([subnet_info])
vn_obj.add_network_ipam(ipam_obj, subnet_data)
self._vnc_lib.virtual_network_create(vn_obj)
vn_obj.clear_pending_updates()
return vn_obj
# end create_virtual_network
def create_network_policy(self, vn1, vn2, service_list=None, service_mode=None, service_type=None, action_type='simple-action'):
addr1 = AddressType(virtual_network=vn1.get_fq_name_str())
addr2 = AddressType(virtual_network=vn2.get_fq_name_str())
port = PortType(-1, 0)
service_name_list = []
if service_list:
for service in service_list:
sti = [ServiceTemplateInterfaceType(
'left'), ServiceTemplateInterfaceType('right')]
st_prop = ServiceTemplateType(
service_type=service_type,
flavor='medium',
image_name='junk',
ordered_interfaces=True,
service_mode=service_mode, interface_type=sti)
service_template = ServiceTemplate(
name=service + 'template',
service_template_properties=st_prop)
self._vnc_lib.service_template_create(service_template)
scale_out = ServiceScaleOutType()
if service_mode == 'in-network':
if_list = [ServiceInstanceInterfaceType(virtual_network=vn1.get_fq_name_str()),
ServiceInstanceInterfaceType(virtual_network=vn2.get_fq_name_str())]
si_props = ServiceInstanceType(
auto_policy=True, interface_list=if_list,
scale_out=scale_out)
else:
if_list = [ServiceInstanceInterfaceType(),
ServiceInstanceInterfaceType()]
si_props = ServiceInstanceType(interface_list=if_list,
scale_out=scale_out)
service_instance = ServiceInstance(
name=service, service_instance_properties=si_props)
service_instance.add_service_template(service_template)
self._vnc_lib.service_instance_create(service_instance)
service_name_list.append(service_instance.get_fq_name_str())
if action_type == 'mirror-to':
mirror = MirrorActionType(analyzer_name=service_instance.get_fq_name_str())
action_list = ActionListType(mirror_to=mirror)
elif service_name_list:
action_list = ActionListType(apply_service=service_name_list)
else:
action_list = ActionListType(simple_action='pass')
prule = PolicyRuleType(direction="<>", protocol="any",
src_addresses=[addr1], dst_addresses=[addr2],
src_ports=[port], dst_ports=[port],
action_list=action_list)
pentry = PolicyEntriesType([prule])
np = NetworkPolicy(str(uuid.uuid4()), network_policy_entries=pentry)
if service_mode == 'in-network':
return np
self._vnc_lib.network_policy_create(np)
return np
# end create_network_policy
# end TestCase
|
{
"content_hash": "79101fc334debccff9733f371bb555db",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 132,
"avg_line_length": 40.45059288537549,
"alnum_prop": 0.6078268516709009,
"repo_name": "DreamLab/contrail-controller",
"id": "94fafb097277a28017586d2ff9dc9ffc684bd9d6",
"size": "20537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config/common/tests/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49170"
},
{
"name": "C++",
"bytes": "16557494"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Groff",
"bytes": "34784"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "5736"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Protocol Buffer",
"bytes": "4744"
},
{
"name": "Python",
"bytes": "3862744"
},
{
"name": "Shell",
"bytes": "64588"
},
{
"name": "Thrift",
"bytes": "40763"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
}
|
from dulwich.repo import Repo
from dulwich.client import get_transport_and_path
import sys
def push(remote_url, repo_path='.'):
"""
Push to a remote repository
:param remote_url: <str> url of remote repository
:param repo_path: <str> path of local repository
:return refs: <dict> dictionary of ref-sha pairs
"""
client, path = get_transport_and_path(remote_url)
r = Repo(repo_path)
objsto = r.object_store
refs = r.get_refs()
def update_refs(old):
# TODO: Too complicated, not necessary to find the refs that
# differ - it's fine to update a ref even if it already exists.
# TODO: Also error out if there are non-fast forward updates
same = list(set(refs).intersection(old))
new = dict([(k,refs[k]) for k in same if refs[k] != old[k]])
dfky = list(set(refs) - set(new))
dfrnt = dict([(k,refs[k]) for k in dfky if k != 'HEAD'])
return dict(new.items() + dfrnt.items())
return client.send_pack(path,
update_refs,
objsto.generate_pack_contents,
sys.stdout.write)
|
{
"content_hash": "bc94472e6fab2b457c4c7a5d2eda2326",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 39.96551724137931,
"alnum_prop": 0.5962036238136325,
"repo_name": "mikofski/dulwichPorcelain",
"id": "e23831c5c1e4e4aaba38057ace81941b540c3a57",
"size": "1159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "push.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "7186"
}
],
"symlink_target": ""
}
|
"""Test the bootstrapping."""
# pylint: disable=protected-access
import asyncio
import glob
import os
from unittest.mock import Mock, patch
import pytest
from homeassistant import bootstrap, core, runner
import homeassistant.config as config_util
from homeassistant.const import SIGNAL_BOOTSTRAP_INTEGRATIONS
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from tests.common import (
MockModule,
MockPlatform,
get_test_config_dir,
mock_coro,
mock_entity_platform,
mock_integration,
)
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
@pytest.fixture(autouse=True)
def apply_mock_storage(hass_storage):
"""Apply the storage mock."""
@pytest.fixture(autouse=True)
async def apply_stop_hass(stop_hass):
"""Make sure all hass are stopped."""
@pytest.fixture(autouse=True)
def mock_http_start_stop():
"""Mock HTTP start and stop."""
with patch(
"homeassistant.components.http.start_http_server_and_save_config"
), patch("homeassistant.components.http.HomeAssistantHTTP.stop"):
yield
@patch("homeassistant.bootstrap.async_enable_logging", Mock())
async def test_home_assistant_core_config_validation(hass):
"""Test if we pass in wrong information for HA conf."""
# Extensive HA conf validation testing is done
result = await bootstrap.async_from_config_dict(
{"homeassistant": {"latitude": "some string"}}, hass
)
assert result is None
async def test_async_enable_logging(hass, caplog):
"""Test to ensure logging is migrated to the queue handlers."""
with patch("logging.getLogger"), patch(
"homeassistant.bootstrap.async_activate_log_queue_handler"
) as mock_async_activate_log_queue_handler, patch(
"homeassistant.bootstrap.logging.handlers.RotatingFileHandler.doRollover",
side_effect=OSError,
):
bootstrap.async_enable_logging(hass)
mock_async_activate_log_queue_handler.assert_called_once()
mock_async_activate_log_queue_handler.reset_mock()
bootstrap.async_enable_logging(
hass,
log_rotate_days=5,
log_file="test.log",
)
mock_async_activate_log_queue_handler.assert_called_once()
for f in glob.glob("test.log*"):
os.remove(f)
for f in glob.glob("testing_config/home-assistant.log*"):
os.remove(f)
assert "Error rolling over log file" in caplog.text
async def test_load_hassio(hass):
"""Test that we load Hass.io component."""
with patch.dict(os.environ, {}, clear=True):
assert bootstrap._get_domains(hass, {}) == set()
with patch.dict(os.environ, {"SUPERVISOR": "1"}):
assert bootstrap._get_domains(hass, {}) == {"hassio"}
@pytest.mark.parametrize("load_registries", [False])
async def test_empty_setup(hass):
"""Test an empty set up loads the core."""
await bootstrap.async_from_config_dict({}, hass)
for domain in bootstrap.CORE_INTEGRATIONS:
assert domain in hass.config.components, domain
async def test_core_failure_loads_safe_mode(hass, caplog):
"""Test failing core setup aborts further setup."""
with patch(
"homeassistant.components.homeassistant.async_setup",
return_value=mock_coro(False),
):
await bootstrap.async_from_config_dict({"group": {}}, hass)
assert "core failed to initialize" in caplog.text
# We aborted early, group not set up
assert "group" not in hass.config.components
@pytest.mark.parametrize("load_registries", [False])
async def test_setting_up_config(hass):
"""Test we set up domains in config."""
await bootstrap._async_set_up_integrations(
hass, {"group hello": {}, "homeassistant": {}}
)
assert "group" in hass.config.components
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_all_present(hass):
"""Test after_dependencies when all present."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
with patch(
"homeassistant.components.logger.async_setup", gen_domain_setup("logger")
):
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}, "logger": {}}
)
assert "root" in hass.config.components
assert "first_dep" in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["logger", "root", "first_dep", "second_dep"]
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_in_stage_1_ignored(hass):
"""Test after_dependencies are ignored in stage 1."""
# This test relies on this
assert "cloud" in bootstrap.STAGE_1_INTEGRATIONS
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="normal_integration",
async_setup=gen_domain_setup("normal_integration"),
partial_manifest={"after_dependencies": ["an_after_dep"]},
),
)
mock_integration(
hass,
MockModule(
domain="an_after_dep",
async_setup=gen_domain_setup("an_after_dep"),
),
)
mock_integration(
hass,
MockModule(
domain="cloud",
async_setup=gen_domain_setup("cloud"),
partial_manifest={"after_dependencies": ["normal_integration"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"cloud": {}, "normal_integration": {}, "an_after_dep": {}}
)
assert "normal_integration" in hass.config.components
assert "cloud" in hass.config.components
assert order == ["cloud", "an_after_dep", "normal_integration"]
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_frontend_before_recorder(hass):
"""Test frontend is setup before recorder."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="normal_integration",
async_setup=gen_domain_setup("normal_integration"),
partial_manifest={"after_dependencies": ["an_after_dep"]},
),
)
mock_integration(
hass,
MockModule(
domain="an_after_dep",
async_setup=gen_domain_setup("an_after_dep"),
),
)
mock_integration(
hass,
MockModule(
domain="frontend",
async_setup=gen_domain_setup("frontend"),
partial_manifest={
"dependencies": ["http"],
"after_dependencies": ["an_after_dep"],
},
),
)
mock_integration(
hass,
MockModule(
domain="http",
async_setup=gen_domain_setup("http"),
),
)
mock_integration(
hass,
MockModule(
domain="recorder",
async_setup=gen_domain_setup("recorder"),
),
)
await bootstrap._async_set_up_integrations(
hass,
{
"frontend": {},
"http": {},
"recorder": {},
"normal_integration": {},
"an_after_dep": {},
},
)
assert "frontend" in hass.config.components
assert "normal_integration" in hass.config.components
assert "recorder" in hass.config.components
assert order == [
"http",
"frontend",
"recorder",
"an_after_dep",
"normal_integration",
]
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_via_platform(hass):
"""Test after_dependencies set up via platform."""
order = []
after_dep_event = asyncio.Event()
def gen_domain_setup(domain):
async def async_setup(hass, config):
if domain == "after_dep_of_platform_int":
await after_dep_event.wait()
order.append(domain)
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="after_dep_of_platform_int",
async_setup=gen_domain_setup("after_dep_of_platform_int"),
),
)
mock_integration(
hass,
MockModule(
domain="platform_int",
async_setup=gen_domain_setup("platform_int"),
partial_manifest={"after_dependencies": ["after_dep_of_platform_int"]},
),
)
mock_entity_platform(hass, "light.platform_int", MockPlatform())
@core.callback
def continue_loading(_):
"""When light component loaded, continue other loading."""
after_dep_event.set()
hass.bus.async_listen_once("component_loaded", continue_loading)
await bootstrap._async_set_up_integrations(
hass, {"light": {"platform": "platform_int"}, "after_dep_of_platform_int": {}}
)
assert "light" in hass.config.components
assert "after_dep_of_platform_int" in hass.config.components
assert "platform_int" in hass.config.components
assert order == ["after_dep_of_platform_int", "platform_int"]
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_not_trigger_load(hass):
"""Test after_dependencies does not trigger loading it."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(hass, {"root": {}, "second_dep": {}})
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
@pytest.mark.parametrize("load_registries", [False])
async def test_setup_after_deps_not_present(hass):
"""Test after_dependencies when referenced integration doesn't exist."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
@pytest.fixture
def mock_is_virtual_env():
"""Mock enable logging."""
with patch(
"homeassistant.bootstrap.is_virtual_env", return_value=False
) as is_virtual_env:
yield is_virtual_env
@pytest.fixture
def mock_enable_logging():
"""Mock enable logging."""
with patch("homeassistant.bootstrap.async_enable_logging") as enable_logging:
yield enable_logging
@pytest.fixture
def mock_mount_local_lib_path():
"""Mock enable logging."""
with patch(
"homeassistant.bootstrap.async_mount_local_lib_path"
) as mount_local_lib_path:
yield mount_local_lib_path
@pytest.fixture
def mock_process_ha_config_upgrade():
"""Mock enable logging."""
with patch(
"homeassistant.config.process_ha_config_upgrade"
) as process_ha_config_upgrade:
yield process_ha_config_upgrade
@pytest.fixture
def mock_ensure_config_exists():
"""Mock enable logging."""
with patch(
"homeassistant.config.async_ensure_config_exists", return_value=True
) as ensure_config_exists:
yield ensure_config_exists
async def test_setup_hass(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
caplog,
loop,
):
"""Test it works."""
verbose = Mock()
log_rotate_days = Mock()
log_file = Mock()
log_no_color = Mock()
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={"browser": {}, "frontend": {}},
), patch.object(bootstrap, "LOG_SLOW_STARTUP_INTERVAL", 5000):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=verbose,
log_rotate_days=log_rotate_days,
log_file=log_file,
log_no_color=log_no_color,
skip_pip=True,
safe_mode=False,
),
)
assert "Waiting on integrations to complete setup" not in caplog.text
assert "browser" in hass.config.components
assert "safe_mode" not in hass.config.components
assert len(mock_enable_logging.mock_calls) == 1
assert mock_enable_logging.mock_calls[0][1] == (
hass,
verbose,
log_rotate_days,
log_file,
log_no_color,
)
assert len(mock_mount_local_lib_path.mock_calls) == 1
assert len(mock_ensure_config_exists.mock_calls) == 1
assert len(mock_process_ha_config_upgrade.mock_calls) == 1
assert hass == core.async_get_hass()
async def test_setup_hass_takes_longer_than_log_slow_startup(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
caplog,
loop,
):
"""Test it works."""
verbose = Mock()
log_rotate_days = Mock()
log_file = Mock()
log_no_color = Mock()
async def _async_setup_that_blocks_startup(*args, **kwargs):
await asyncio.sleep(0.6)
return True
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={"browser": {}, "frontend": {}},
), patch.object(bootstrap, "LOG_SLOW_STARTUP_INTERVAL", 0.3), patch.object(
bootstrap, "SLOW_STARTUP_CHECK_INTERVAL", 0.05
), patch(
"homeassistant.components.frontend.async_setup",
side_effect=_async_setup_that_blocks_startup,
):
await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=verbose,
log_rotate_days=log_rotate_days,
log_file=log_file,
log_no_color=log_no_color,
skip_pip=True,
safe_mode=False,
),
)
assert "Waiting on integrations to complete setup" in caplog.text
async def test_setup_hass_invalid_yaml(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
with patch(
"homeassistant.config.async_hass_config_yaml", side_effect=HomeAssistantError
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=False,
),
)
assert "safe_mode" in hass.config.components
assert len(mock_mount_local_lib_path.mock_calls) == 0
async def test_setup_hass_config_dir_nonexistent(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
mock_ensure_config_exists.return_value = False
assert (
await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=False,
),
)
is None
)
async def test_setup_hass_safe_mode(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
with patch("homeassistant.components.browser.setup") as browser_setup, patch(
"homeassistant.config_entries.ConfigEntries.async_domains",
return_value=["browser"],
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=True,
),
)
assert "safe_mode" in hass.config.components
assert len(mock_mount_local_lib_path.mock_calls) == 0
# Validate we didn't try to set up config entry.
assert "browser" not in hass.config.components
assert len(browser_setup.mock_calls) == 0
async def test_setup_hass_invalid_core_config(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={"homeassistant": {"non-existing": 1}},
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=False,
),
)
assert "safe_mode" in hass.config.components
async def test_setup_safe_mode_if_no_frontend(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test we setup safe mode if frontend didn't load."""
verbose = Mock()
log_rotate_days = Mock()
log_file = Mock()
log_no_color = Mock()
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={
"homeassistant": {
"internal_url": "http://192.168.1.100:8123",
"external_url": "https://abcdef.ui.nabu.casa",
},
"map": {},
"person": {"invalid": True},
},
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=verbose,
log_rotate_days=log_rotate_days,
log_file=log_file,
log_no_color=log_no_color,
skip_pip=True,
safe_mode=False,
),
)
assert "safe_mode" in hass.config.components
assert hass.config.config_dir == get_test_config_dir()
assert hass.config.skip_pip
assert hass.config.internal_url == "http://192.168.1.100:8123"
assert hass.config.external_url == "https://abcdef.ui.nabu.casa"
@pytest.mark.parametrize("load_registries", [False])
async def test_empty_integrations_list_is_only_sent_at_the_end_of_bootstrap(hass):
"""Test empty integrations list is only sent at the end of bootstrap."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
await asyncio.sleep(0.1)
async def _background_task():
await asyncio.sleep(0.2)
await hass.async_create_task(_background_task())
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="normal_integration",
async_setup=gen_domain_setup("normal_integration"),
partial_manifest={"after_dependencies": ["an_after_dep"]},
),
)
mock_integration(
hass,
MockModule(
domain="an_after_dep",
async_setup=gen_domain_setup("an_after_dep"),
),
)
integrations = []
@core.callback
def _bootstrap_integrations(data):
integrations.append(data)
async_dispatcher_connect(
hass, SIGNAL_BOOTSTRAP_INTEGRATIONS, _bootstrap_integrations
)
with patch.object(bootstrap, "SLOW_STARTUP_CHECK_INTERVAL", 0.05):
await bootstrap._async_set_up_integrations(
hass, {"normal_integration": {}, "an_after_dep": {}}
)
await hass.async_block_till_done()
assert integrations[0] != {}
assert "an_after_dep" in integrations[0]
assert integrations[-3] != {}
assert integrations[-1] == {}
assert "normal_integration" in hass.config.components
assert order == ["an_after_dep", "normal_integration"]
@pytest.mark.parametrize("load_registries", [False])
async def test_warning_logged_on_wrap_up_timeout(hass, caplog):
"""Test we log a warning on bootstrap timeout."""
def gen_domain_setup(domain):
async def async_setup(hass, config):
await asyncio.sleep(0.1)
async def _background_task():
await asyncio.sleep(0.2)
await hass.async_create_task(_background_task())
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="normal_integration",
async_setup=gen_domain_setup("normal_integration"),
partial_manifest={},
),
)
with patch.object(bootstrap, "WRAP_UP_TIMEOUT", 0):
await bootstrap._async_set_up_integrations(hass, {"normal_integration": {}})
await hass.async_block_till_done()
assert "Setup timed out for bootstrap - moving forward" in caplog.text
|
{
"content_hash": "2d01da772be751725f7c1f56a76ebbfa",
"timestamp": "",
"source": "github",
"line_count": 797,
"max_line_length": 86,
"avg_line_length": 29.485570890840652,
"alnum_prop": 0.5997446808510638,
"repo_name": "w1ll1am23/home-assistant",
"id": "e51f4d315eefcf7ae4decefb960f1c04c70e92fe",
"size": "23500",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/test_bootstrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_restless import APIManager
import datetime
import os
from app import config
app = Flask(__name__)
# init config
app.config.from_object(config)
# init views
from app import views
#init database
from app.database import db, restless
from app.model import user, system
db.create_all()
from app import db_init
db_init.init()
if __name__ == "__main__":
app.run(debug=True)
|
{
"content_hash": "ccddf0a09f466b771be725fb360f9f30",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 39,
"avg_line_length": 17.074074074074073,
"alnum_prop": 0.7505422993492408,
"repo_name": "chengjf/database-interface-doc-management",
"id": "a7d77dd5edc1d611d86e95e784b9f1e9e3f0050e",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask-demo/app/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1000"
},
{
"name": "C",
"bytes": "417752"
},
{
"name": "C++",
"bytes": "106543"
},
{
"name": "CSS",
"bytes": "23303"
},
{
"name": "HTML",
"bytes": "30247"
},
{
"name": "Java",
"bytes": "30914"
},
{
"name": "JavaScript",
"bytes": "7783355"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "10014902"
}
],
"symlink_target": ""
}
|
"""Common functionality and utility functions for unit tests
"""
import os
from ozelot import client
from ozelot import config
def get_test_db_client():
"""Get client for test database
Returns
:mod:`ozelot.client.Client`: database client
"""
return client.Client(connection_string='sqlite:///' + config.TESTING_SQLITE_DB_PATH)
def remove_test_db_file():
"""Remove the test database file (if it exists)
"""
if os.path.exists(config.TESTING_SQLITE_DB_PATH):
os.remove(config.TESTING_SQLITE_DB_PATH)
def get_fixtures_path():
"""Get path to test fixtures directory
Returns
str: path to test fixtures directory
"""
return os.path.join(os.path.dirname(__file__), 'fixtures')
|
{
"content_hash": "b55fb93cefe263b956ff5e7d24c53e21",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 88,
"avg_line_length": 23.34375,
"alnum_prop": 0.6733601070950469,
"repo_name": "trycs/ozelot",
"id": "bc77d9df539c5d1e5337f038d058fdabacea032d",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ozelot/tests/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "122939"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from past.builtins import basestring
import os.path
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from httpwatcher.errors import MissingFolderError
from tornado import gen
from tornado.ioloop import PeriodicCallback
from tornado.queues import Queue
import logging
logger = logging.getLogger(__name__)
__all__ = [
"FileSystemWatcher"
]
class FileSystemWatcher(object):
def __init__(self, watch_paths, on_changed=None, interval=1.0, recursive=True):
"""Constructor.
Args:
watch_paths: A list of filesystem paths to watch for changes.
on_changed: Callback to call when one or more changes to the watch path are detected.
interval: The minimum interval at which to notify about changes (in seconds).
recursive: Should the watch path be monitored recursively for changes?
"""
if isinstance(watch_paths, basestring):
watch_paths = [watch_paths]
watch_paths = [os.path.abspath(path) for path in watch_paths]
for path in watch_paths:
if not os.path.exists(path) or not os.path.isdir(path):
raise MissingFolderError(path)
self.watch_paths = watch_paths
self.interval = interval * 1000.0
self.recursive = recursive
self.periodic_callback = PeriodicCallback(self.check_fs_events, self.interval)
self.on_changed = on_changed
self.observer = Observer()
for path in self.watch_paths:
self.observer.schedule(
WatcherEventHandler(self),
path,
self.recursive
)
self.started = False
self.fs_event_queue = Queue()
def track_event(self, event):
self.fs_event_queue.put(event)
@gen.coroutine
def check_fs_events(self):
drained_events = []
while self.fs_event_queue.qsize() > 0:
drained_events.append(self.fs_event_queue.get_nowait())
if len(drained_events) > 0 and callable(self.on_changed):
logger.debug("Detected %d file system change(s) - triggering callback" % len(drained_events))
self.on_changed(drained_events)
def start(self):
if not self.started:
self.observer.start()
self.periodic_callback.start()
self.started = True
logger.debug("Started file system watcher for paths:\n%s" % "\n".join(self.watch_paths))
def shutdown(self, timeout=None):
if self.started:
self.periodic_callback.stop()
self.observer.stop()
self.observer.join(timeout=timeout)
self.started = False
logger.debug("Shut down file system watcher for path:\n%s" % "\n".join(self.watch_paths))
class WatcherEventHandler(FileSystemEventHandler):
def __init__(self, watcher):
super(WatcherEventHandler, self).__init__()
self.watcher = watcher
def on_any_event(self, event):
logger.debug("WatcherEventHandler detected filesystem event: %s" % event)
self.watcher.track_event(event)
|
{
"content_hash": "a1c6924426da624bd71da8e612b1517e",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 105,
"avg_line_length": 34.13978494623656,
"alnum_prop": 0.6390551181102362,
"repo_name": "thanethomson/httpwatcher",
"id": "a4d4a727b943fdf3ce4d08c7e9838a44dddfef25",
"size": "3200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "httpwatcher/filesystem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "22492"
},
{
"name": "Python",
"bytes": "35995"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
}
|
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('cp949')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='cp949',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
{
"content_hash": "daf27e99cd643f64e7505359b5ead761",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 23.871794871794872,
"alnum_prop": 0.6938775510204082,
"repo_name": "ppyordanov/HCI_4_Future_Cities",
"id": "364fc7eaceea625e6aa530fdc6c3bff62c36f90e",
"size": "1029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/src/virtualenv/Lib/encodings/cp949.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "427445"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "280650"
},
{
"name": "D",
"bytes": "9679"
},
{
"name": "HTML",
"bytes": "37335"
},
{
"name": "Java",
"bytes": "740594"
},
{
"name": "JavaScript",
"bytes": "1801741"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "2631176"
},
{
"name": "Shell",
"bytes": "12283"
}
],
"symlink_target": ""
}
|
"""
Django settings for learnwithpeople project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
path = lambda *a: os.path.join(BASE_DIR, *a)
env = lambda key, default: os.environ.get(key, default)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
ADMINS = (
('Admin', os.environ.get('ADMIN_EMAIL', 'admin@localhost') ),
)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'youshouldchangethis')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False) == 'true'
CRISPY_FAIL_SILENTLY = not DEBUG
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# 3rd party apps
'corsheaders',
'crispy_forms',
'crispy_bootstrap5',
'phonenumber_field',
'rest_framework',
'django_filters',
'webpack_loader',
'tinymce',
'django_bleach',
# own
'studygroups',
'backup',
'analytics',
'uxhelpers',
'places',
'custom_registration',
'advice',
'surveys',
'announce',
'community_calendar',
'client_logging',
'contact',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learnwithpeople.urls'
WSGI_APPLICATION = 'learnwithpeople.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [
path('locale')
]
FORMAT_MODULE_PATH = [
'learnwithpeople.formats',
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = path('static_serve')
STATICFILES_DIRS = [
path('static'),
path('assets'),
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other finders..
]
MEDIA_URL = '/media/'
MEDIA_ROOT = path('upload')
# If this isn't set, you'll get inconsisten permissions when uploading big files on linux
FILE_UPLOAD_PERMISSIONS = 0o644
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [path('templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'studygroups.context_processors.domain',
'studygroups.context_processors.globals',
]
}
},
]
####### Django Webpack Loader config #######
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': path('assets/frontend-webpack-manifest.json'),
},
'STYLEBUILD': {
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': path('assets/style-webpack-manifest.json'),
},
}
CRISPY_ALLOWED_TEMPLATE_PACKS = "bootstrap5"
CRISPY_TEMPLATE_PACK = "bootstrap5"
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_PORT = os.environ.get('EMAIL_PORT', 25)
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
if DEBUG is True and EMAIL_HOST is None:
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = path('mailbox')
# Default email sender
DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', 'webmaster@localhost')
# email for sending the community digest to
COMMUNITY_DIGEST_EMAIL = env('COMMUNITY_DIGEST_EMAIL', 'digest@localhost')
# Used for error messages to admin/staff
SERVER_EMAIL = env('SERVER_EMAIL', 'no-reply@p2pu.org')
# Team email
TEAM_EMAIL = env('TEAM_EMAIL', 'thepeople@p2pu.org')
SUPPORT_EMAIL = env('SUPPORT_EMAIL', 'support@localhost')
MEMBER_SUPPORT_URL = env('MEMBER_SUPPORT_URL', '') # TODO rename to TEAM_SUPPORT_URL
TEAM_MANAGER_EMAIL = env('TEAM_MANAGER_EMAIL', 'teams@localhost')
COMMUNITY_MANAGER = os.environ.get('COMMUNITY_MANAGER_EMAIL', 'community@localhost')
##### Database config
import dj_database_url
DATABASES['default'] = dj_database_url.config(default='sqlite:///{0}'.format(path('db.sqlite3')))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Exempt trusted domains
CSRF_TRUSTED_ORIGINS = [
'.p2pu.org',
]
if DEBUG:
CSRF_TRUSTED_ORIGINS += [
'localhost:8000',
'localhost:3001',
'localhost:4000',
]
# CORS config
CORS_ORIGIN_WHITELIST = [
"https://www.p2pu.org",
"https://p2pu.github.io",
]
if DEBUG:
CORS_ORIGIN_WHITELIST.append('http://localhost:8000')
CORS_ORIGIN_WHITELIST.append('http://localhost:3001')
CORS_ORIGIN_WHITELIST.append('http://localhost:4000')
CORS_ALLOWED_ORIGIN_REGEXES = [
r"^https://.*\.p2pu\.org$",
]
CORS_ALLOW_CREDENTIALS = True
AUTHENTICATION_BACKENDS = ['custom_registration.backend.CaseInsensitiveBackend']
# URL for P2PU static site (only really useful for dev and staging environments
STATIC_SITE_URL = env('STATIC_SITE_URL', 'http://localhost:4000')
##### Twilio config
TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')
TWILIO_NUMBER = os.environ.get('TWILIO_NUMBER')
LOGIN_REDIRECT_URL = '/login_redirect/'
LOGOUT_REDIRECT_URL = STATIC_SITE_URL
DOMAIN = env('DOMAIN', 'localhost:8000')
PROTOCOL = env('PROTOCOL', 'https')
####### Google analytics tracking info #######
GA_TRACKING_ID = env('GA_TRACKING_ID', 'UA-0000000-00')
####### Celery config #######
CELERY_BROKER_URL = env('BROKER_URL', 'amqp://guest:guest@localhost//')
from celery.schedules import crontab
CELERY_BEAT_SCHEDULE = {
'send_reminders': {
'task': 'studygroups.tasks.send_reminders',
'schedule': crontab(minute='*/5'),
},
'send_meeting_wrapup': {
'task': 'studygroups.tasks.send_meeting_wrapups',
'schedule': crontab(minute='*/5'),
},
'send_new_user_email': {
'task': 'custom_registration.tasks.send_new_user_emails',
'schedule': crontab(minute='*/10'),
},
'send_learner_surveys': {
'task': 'studygroups.tasks.send_all_learner_surveys',
'schedule': crontab(minute='30'),
},
'send_facilitator_survey': {
'task': 'studygroups.tasks.send_all_facilitator_surveys',
'schedule': crontab(minute='30'),
},
'weekly_update': {
'task': 'studygroups.tasks.weekly_update',
'schedule': crontab(hour=10, minute=0, day_of_week='monday'),
},
'daily_backup': {
'task': 'backup.tasks.make_backup',
'schedule': crontab(hour=1, minute=0),
},
'sync_typeform_surveys': {
'task': 'surveys.tasks.sync_surveys',
'schedule': crontab(minute='10'),
},
'send_community_digest': {
'task': 'studygroups.tasks.send_out_community_digest',
'schedule': crontab(day_of_week='monday', hour=11, minute=0),
},
'refresh_instagram_token': {
'task': 'studygroups.tasks.refresh_instagram_token',
'schedule': crontab(day_of_month=[1], hour=1, minute=0)
},
'anonymize_signups': {
'task': 'studygroups.tasks.anonymize_signups',
'schedule': crontab(hour=23, minute=0),
}
}
LOGGING = {
'version': 1,
'dissable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG' if DEBUG else 'WARNING',
},
},
'root': {
'handlers': ['mail_admins', 'console'],
'level': 'DEBUG',
},
'loggers': {
'django': {
'handlers': ['mail_admins', 'console'],
'level': 'WARNING',
'propagate': False,
},
},
}
# Disable django's default logging
LOGGING_CONFIG = None
import logging.config
logging.config.dictConfig(LOGGING)
# Explicitly set DEFAULT_AUTO_FIELD. Option added Django 3.2
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
#### Backup config ####
BACKUP_DIR = os.environ.get('BACKUP_DIR', '/tmp') # Directory where backups will be stored locally
BACKUP_AWS_ACCESS_KEY_ID = os.environ.get('BACKUP_AWS_ACCESS_KEY_ID') # AWS key with access to backup bucket
BACKUP_AWS_SECRET_ACCESS_KEY = os.environ.get('BACKUP_AWS_SECRET_ACCESS_KEY') # AWS secret for above key
BACKUP_AWS_STORAGE_BUCKET_NAME = os.environ.get('BACKUP_AWS_STORAGE_BUCKET_NAME') # Name of the bucket where backups should be stored
BACKUP_AWS_KEY_PREFIX = os.environ.get('BACKUP_AWS_KEY_PREFIX') # Prefix for generated key on AWS s3
#### Mailchimp API key ###
MAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', '')
MAILCHIMP_LIST_ID = env('MAILCHIMP_LIST_ID', '')
MAILCHIMP_API_ROOT = env('MAILCHIMP_API_ROOT', 'https://??.api.mailchimp.com/3.0/')
MAILCHIMP_WEBHOOK_SECRET = env('MAILCHIMP_WEBHOOK_SECRET', '')
DISCOURSE_BASE_URL = env('DISCOURSE_BASE_URL', 'https://community.p2pu.org')
DISCOURSE_SSO_SECRET = env('DISCOURSE_SSO_SECRET', '')
DISCOURSE_API_KEY = env('DISCOURSE_API_KEY', '')
DISCOURSE_API_USERNAME = env('DISCOURSE_API_USERNAME', '')
DISCOURSE_BOT_API_KEY = env('DISCOURSE_BOT_API_KEY', '')
DISCOURSE_BOT_API_USERNAME = env('DISCOURSE_BOT_API_USERNAME', '')
DISCOURSE_COURSES_AND_TOPICS_CATEGORY_ID = env('DISCOURSE_COURSES_AND_TOPICS_CATEGORY_ID', 69)
TYPEFORM_ACCESS_TOKEN = env('TYPEFORM_ACCESS_TOKEN', '')
TYPEFORM_FACILITATOR_SURVEY_FORM = env('TYPEFORM_FACILITATOR_SURVEY_FORM', 'NOTSET')
TYPEFORM_LEARNER_SURVEY_FORM = env('TYPEFORM_LEARNER_SURVEY_FORM', 'NOTSET')
# AWS credentials for email resources
P2PU_RESOURCES_AWS_ACCESS_KEY = env('RESOURCES_AWS_ACCESS_KEY', '')
P2PU_RESOURCES_AWS_SECRET_KEY = env('RESOURCES_AWS_SECRET_KEY', '')
P2PU_RESOURCES_AWS_BUCKET = env('RESOURCES_AWS_BUCKET', '')
# Config for sending announcements
MAILGUN_API_KEY = env('MAILGUN_API_KEY', '')
MAILGUN_DOMAIN = env('MAILGUN_DOMAIN', '')
ANNOUNCE_EMAIL = env('ANNOUNCE_EMAIL', 'announce@localhost')
# Instagram token
INSTAGRAM_TOKEN = env('INSTAGRAM_TOKEN', '')
# Mapbox access token
MAPBOX_TOKEN = env('MAPBOX_TOKEN', '')
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend']
}
# Rich text editor configurations
TINYMCE_DEFAULT_CONFIG = {
'menubar': False,
'plugins': 'link lists',
'toolbar': 'undo redo | formatselect | bold italic | bullist numlist | link | removeformat',
'valid_elements': 'p,h3,h4,h5,h6,strong,em,a,a[href|target=_blank|rel=noopener],ul,ol,li,div,span,br',
'block_formats': 'Paragraph=p; Heading 1=h3; Heading 2=h4; Heading 3=h5',
}
BLEACH_DEFAULT_WIDGET = 'tinymce.widgets.TinyMCE'
RECAPTCHA_SITE_KEY = env('RECAPTCHA_SITE_KEY', '')
RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', '')
|
{
"content_hash": "c28b314ee2a4b29694de928b5d4709db",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 133,
"avg_line_length": 30.58009708737864,
"alnum_prop": 0.6665608381617588,
"repo_name": "p2pu/learning-circles",
"id": "c3c9fdbf1a1d86dadeba11127cc624fbad0520a2",
"size": "12599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learnwithpeople/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "Dockerfile",
"bytes": "2110"
},
{
"name": "HTML",
"bytes": "222765"
},
{
"name": "JavaScript",
"bytes": "202138"
},
{
"name": "Python",
"bytes": "859945"
},
{
"name": "SCSS",
"bytes": "122949"
},
{
"name": "Shell",
"bytes": "808"
}
],
"symlink_target": ""
}
|
import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from poliastro.bodies import Sun, Mercury, Venus, Earth, Moon, Mars
from poliastro.bodies import Jupiter, Saturn, Uranus, Neptune, Pluto
from poliastro.patched_conics import compute_soi
def test_compute_soi():
# Data from Table A.2., Curtis "Orbital Mechanics for Engineering Students"
data = [
# body, SOI radius (m)
(Sun, None),
(Mercury, 1.12e8),
(Venus, 6.16e8),
(Earth, 9.25e8),
# (Moon, 6.61e7),
(Mars, 5.77e8),
(Jupiter, 4.82e10),
(Saturn, 5.48e10),
(Uranus, 5.18e10),
(Neptune, 8.66e10),
# (Pluto, 3.08e9)
]
for row in data:
body, expected_r_SOI = row
if expected_r_SOI is not None:
expected_r_SOI = expected_r_SOI * u.m
else:
continue
r_SOI = compute_soi(body)
assert_quantity_allclose(r_SOI, expected_r_SOI, rtol=1e-1)
def test_compute_missing_body_soi_raises_error():
with pytest.raises(RuntimeError) as excinfo:
r_SOI = compute_soi(Moon)
assert "To compute the semimajor axis for Moon and Pluto use the JPL ephemeris" in excinfo.exconly()
|
{
"content_hash": "8cd43505e9f00e76501e7961edb92b11",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 104,
"avg_line_length": 29.186046511627907,
"alnum_prop": 0.6199203187250996,
"repo_name": "anhiga/poliastro",
"id": "23438380aa8423570d6b300ec7bbd5d0f6a42910",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/poliastro/tests/test_patched_conics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3053"
},
{
"name": "Jupyter Notebook",
"bytes": "4110"
},
{
"name": "Python",
"bytes": "180988"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
}
|
"""mmg rates now set to 1.65 pence per sms
Revision ID: 0040_adjust_mmg_provider_rate
Revises: 0039_fix_notifications
Create Date: 2016-07-06 15:19:23.124212
"""
# revision identifiers, used by Alembic.
revision = "0040_adjust_mmg_provider_rate"
down_revision = "0039_fix_notifications"
import uuid
from datetime import datetime
import sqlalchemy as sa
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn.execute(
sa.sql.text(
(
"INSERT INTO provider_rates (id, valid_from, rate, provider_id) "
"VALUES (:id, :valid_from, :rate, (SELECT id FROM provider_details WHERE identifier = 'mmg'))"
)
),
id=uuid.uuid4(),
valid_from=datetime(2016, 7, 1),
rate=1.65,
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn.execute(
(
"DELETE FROM provider_rates "
"WHERE provider_id = (SELECT id FROM provider_details WHERE identifier = 'mmg') "
"AND rate = 1.65"
)
)
### end Alembic commands ###
|
{
"content_hash": "3a1f07daaf8494f4e22bdd123fb31e7b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 110,
"avg_line_length": 26.23404255319149,
"alnum_prop": 0.6009732360097324,
"repo_name": "alphagov/notifications-api",
"id": "195b6e2aa67f32eb622729aec34c462df04e1142",
"size": "1233",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "migrations/versions/0040_adjust_mmg_provider_rate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "719"
},
{
"name": "Jinja",
"bytes": "5543"
},
{
"name": "Makefile",
"bytes": "6627"
},
{
"name": "Mako",
"bytes": "361"
},
{
"name": "Procfile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "3506225"
},
{
"name": "Shell",
"bytes": "13179"
}
],
"symlink_target": ""
}
|
"""Custom yaml object types."""
from dataclasses import dataclass
import yaml
class NodeListClass(list):
"""Wrapper class to be able to add attributes on a list."""
class NodeStrClass(str):
"""Wrapper class to be able to add attributes on a string."""
@dataclass(frozen=True)
class Input:
"""Input that should be substituted."""
name: str
@classmethod
def from_node(cls, loader: yaml.Loader, node: yaml.nodes.Node) -> "Input":
"""Create a new placeholder from a node."""
return cls(node.value)
|
{
"content_hash": "36b05553213abcc23187a10824ba41a3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 78,
"avg_line_length": 22.625,
"alnum_prop": 0.6703499079189686,
"repo_name": "tboyce1/home-assistant",
"id": "0e46820e0db12091a47d717dc8b47f2008eb083d",
"size": "543",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/util/yaml/objects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1510140"
},
{
"name": "Python",
"bytes": "5144365"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
}
|
"""
Package-level global flags are defined here, the rest are defined
where they're used.
"""
from gflags import *
# This keeps pylint from barfing on the imports
FLAGS = FLAGS
DEFINE_string = DEFINE_string
DEFINE_integer = DEFINE_integer
DEFINE_bool = DEFINE_bool
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
DEFINE_string('region', 'nova', 'Region to use')
DEFINE_string('test_image', 'ami-tiny', 'Image to use for launch tests')
DEFINE_string('use_ipv6', True, 'use the ipv6 or not')
|
{
"content_hash": "a7a8fa0d4e27c7029a8cc309ae9e503f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.7326732673267327,
"repo_name": "anotherjesse/nova",
"id": "35f432a77800ffe982e0196dcf19141b57a780dc",
"size": "1383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smoketests/flags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "47238"
},
{
"name": "Python",
"bytes": "1445578"
},
{
"name": "Shell",
"bytes": "37610"
}
],
"symlink_target": ""
}
|
import json
import turtle
from figures.simple_figures import Circle, Square, Rectangle, Pie, RegularPolygon
def main():
try:
input_data = load_input_data("figures_simple.json")
figures = create_figures(input_data)
draw_figures(figures)
except Exception as e:
print("Invalid input file provided! Error:", e)
def load_input_data(input_filename):
with open(input_filename) as f:
input_data = json.load(f)
return input_data
FIGURE_TYPES = {
'square': Square,
'circle': Circle,
'rectangle': Rectangle,
'pie': Pie,
'reg_polygon': RegularPolygon
}
def create_figures(input_data: dict) -> list:
result = []
for f_info in input_data:
figure_type = f_info['type']
if figure_type in FIGURE_TYPES:
figure_class = FIGURE_TYPES[figure_type]
result.append(figure_class(**f_info))
else:
raise ValueError('Unsupported figure')
return result
def draw_figures(figures):
for figure in figures:
t = turtle.Turtle()
t.speed('fast')
figure.draw(t)
turtle.exitonclick()
if __name__ == "__main__":
main()
|
{
"content_hash": "6142cee758e9775d5ecde438c436431b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 81,
"avg_line_length": 22.73076923076923,
"alnum_prop": 0.61082910321489,
"repo_name": "natla/softuni-python",
"id": "ae788265887752df42b3c3961c54db3dfc920af0",
"size": "1182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SoftUni-L8-OOP/Problem 1/figures/draw_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46180"
}
],
"symlink_target": ""
}
|
"""This module contains Google Dataproc Metastore operators."""
from datetime import datetime
from time import sleep
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry, exponential_sleep_generator
from google.cloud.metastore_v1 import MetadataExport, MetadataManagementActivity
from google.cloud.metastore_v1.types import Backup, MetadataImport, Service
from google.cloud.metastore_v1.types.metastore import DatabaseDumpSpec, Restore
from google.protobuf.field_mask_pb2 import FieldMask
from googleapiclient.errors import HttpError
from airflow import AirflowException
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.xcom import XCom
from airflow.providers.google.cloud.hooks.dataproc_metastore import DataprocMetastoreHook
from airflow.providers.google.common.links.storage import StorageLink
if TYPE_CHECKING:
from airflow.models.taskinstance import TaskInstanceKey
from airflow.utils.context import Context
BASE_LINK = "https://console.cloud.google.com"
METASTORE_BASE_LINK = BASE_LINK + "/dataproc/metastore/services/{region}/{service_id}"
METASTORE_BACKUP_LINK = METASTORE_BASE_LINK + "/backups/{resource}?project={project_id}"
METASTORE_BACKUPS_LINK = METASTORE_BASE_LINK + "/backuprestore?project={project_id}"
METASTORE_EXPORT_LINK = METASTORE_BASE_LINK + "/importexport?project={project_id}"
METASTORE_IMPORT_LINK = METASTORE_BASE_LINK + "/imports/{resource}?project={project_id}"
METASTORE_SERVICE_LINK = METASTORE_BASE_LINK + "/config?project={project_id}"
class DataprocMetastoreLink(BaseOperatorLink):
"""Helper class for constructing Dataproc Metastore resource link"""
name = "Dataproc Metastore"
key = "conf"
@staticmethod
def persist(
context: "Context",
task_instance: Union[
"DataprocMetastoreCreateServiceOperator",
"DataprocMetastoreGetServiceOperator",
"DataprocMetastoreRestoreServiceOperator",
"DataprocMetastoreUpdateServiceOperator",
"DataprocMetastoreListBackupsOperator",
"DataprocMetastoreExportMetadataOperator",
],
url: str,
):
task_instance.xcom_push(
context=context,
key=DataprocMetastoreLink.key,
value={
"region": task_instance.region,
"service_id": task_instance.service_id,
"project_id": task_instance.project_id,
"url": url,
},
)
def get_link(
self,
operator,
dttm: Optional[datetime] = None,
ti_key: Optional["TaskInstanceKey"] = None,
) -> str:
if ti_key is not None:
conf = XCom.get_value(key=self.key, ti_key=ti_key)
else:
assert dttm
conf = XCom.get_one(
dag_id=operator.dag.dag_id,
task_id=operator.task_id,
execution_date=dttm,
key=self.key,
)
return (
conf["url"].format(
region=conf["region"],
service_id=conf["service_id"],
project_id=conf["project_id"],
)
if conf
else ""
)
class DataprocMetastoreDetailedLink(BaseOperatorLink):
"""Helper class for constructing Dataproc Metastore detailed resource link"""
name = "Dataproc Metastore resource"
key = "config"
@staticmethod
def persist(
context: "Context",
task_instance: Union[
"DataprocMetastoreCreateBackupOperator",
"DataprocMetastoreCreateMetadataImportOperator",
],
url: str,
resource: str,
):
task_instance.xcom_push(
context=context,
key=DataprocMetastoreDetailedLink.key,
value={
"region": task_instance.region,
"service_id": task_instance.service_id,
"project_id": task_instance.project_id,
"url": url,
"resource": resource,
},
)
def get_link(
self,
operator,
dttm: Optional[datetime] = None,
ti_key: Optional["TaskInstanceKey"] = None,
) -> str:
if ti_key is not None:
conf = XCom.get_value(key=self.key, ti_key=ti_key)
else:
assert dttm
conf = XCom.get_one(
dag_id=operator.dag.dag_id,
task_id=operator.task_id,
execution_date=dttm,
key=DataprocMetastoreDetailedLink.key,
)
return (
conf["url"].format(
region=conf["region"],
service_id=conf["service_id"],
project_id=conf["project_id"],
resource=conf["resource"],
)
if conf
else ""
)
class DataprocMetastoreCreateBackupOperator(BaseOperator):
"""
Creates a new backup in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup: Required. The backup to create. The ``name`` field is ignored. The ID of the created
backup must be provided in the request's ``backup_id`` field.
This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this
should not be set.
:param backup_id: Required. The ID of the backup, which is used as the final component of the backup's
name. This value must be between 1 and 64 characters long, begin with a letter, end with a letter or
number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'backup',
'impersonation_chain',
)
template_fields_renderers = {'backup': 'json'}
operator_extra_links = (DataprocMetastoreDetailedLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup: Union[Dict, Backup],
backup_id: str,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup = backup
self.backup_id = backup_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating Dataproc Metastore backup: %s", self.backup_id)
try:
operation = hook.create_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup=self.backup,
backup_id=self.backup_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
backup = hook.wait_for_operation(self.timeout, operation)
self.log.info("Backup %s created successfully", self.backup_id)
except HttpError as err:
if err.resp.status not in (409, '409'):
raise
self.log.info("Backup %s already exists", self.backup_id)
backup = hook.get_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_id=self.backup_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreDetailedLink.persist(
context=context, task_instance=self, url=METASTORE_BACKUP_LINK, resource=self.backup_id
)
return Backup.to_dict(backup)
class DataprocMetastoreCreateMetadataImportOperator(BaseOperator):
"""
Creates a new MetadataImport in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param metadata_import: Required. The metadata import to create. The ``name`` field is ignored. The ID of
the created metadata import must be provided in the request's ``metadata_import_id`` field.
This corresponds to the ``metadata_import`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param metadata_import_id: Required. The ID of the metadata import, which is used as the final component
of the metadata import's name. This value must be between 1 and 64 characters long, begin with a
letter, end with a letter or number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``metadata_import_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'metadata_import',
'impersonation_chain',
)
template_fields_renderers = {'metadata_import': 'json'}
operator_extra_links = (DataprocMetastoreDetailedLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
metadata_import: MetadataImport,
metadata_import_id: str,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.metadata_import = metadata_import
self.metadata_import_id = metadata_import_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating Dataproc Metastore metadata import: %s", self.metadata_import_id)
operation = hook.create_metadata_import(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
metadata_import=self.metadata_import,
metadata_import_id=self.metadata_import_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
metadata_import = hook.wait_for_operation(self.timeout, operation)
self.log.info("Metadata import %s created successfully", self.metadata_import_id)
DataprocMetastoreDetailedLink.persist(
context=context, task_instance=self, url=METASTORE_IMPORT_LINK, resource=self.metadata_import_id
)
return MetadataImport.to_dict(metadata_import)
class DataprocMetastoreCreateServiceOperator(BaseOperator):
"""
Creates a metastore service in a project and location.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param service: Required. The Metastore service to create. The ``name`` field is ignored. The ID of
the created metastore service must be provided in the request's ``service_id`` field.
This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'service',
'impersonation_chain',
)
template_fields_renderers = {'service': 'json'}
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
service: Union[Dict, Service],
service_id: str,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service = service
self.service_id = service_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating Dataproc Metastore service: %s", self.project_id)
try:
operation = hook.create_service(
region=self.region,
project_id=self.project_id,
service=self.service,
service_id=self.service_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
service = hook.wait_for_operation(self.timeout, operation)
self.log.info("Service %s created successfully", self.service_id)
except HttpError as err:
if err.resp.status not in (409, '409'):
raise
self.log.info("Instance %s already exists", self.service_id)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
return Service.to_dict(service)
class DataprocMetastoreDeleteBackupOperator(BaseOperator):
"""
Deletes a single backup.
:param project_id: Required. The ID of the Google Cloud project that the backup belongs to.
:param region: Required. The ID of the Google Cloud region that the backup belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_id: Required. The ID of the backup, which is used as the final component of the backup's
name. This value must be between 1 and 64 characters long, begin with a letter, end with a letter or
number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup_id: str,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup_id = backup_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> None:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Deleting Dataproc Metastore backup: %s", self.backup_id)
operation = hook.delete_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_id=self.backup_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Backup %s deleted successfully", self.project_id)
class DataprocMetastoreDeleteServiceOperator(BaseOperator):
"""
Deletes a single service.
:param request: The request object. Request message for
[DataprocMetastore.DeleteService][google.cloud.metastore.v1.DataprocMetastore.DeleteService].
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
def __init__(
self,
*,
region: str,
project_id: str,
service_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service_id = service_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Deleting Dataproc Metastore service: %s", self.project_id)
operation = hook.delete_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Service %s deleted successfully", self.project_id)
class DataprocMetastoreExportMetadataOperator(BaseOperator):
"""
Exports metadata from a service.
:param destination_gcs_folder: A Cloud Storage URI of a folder, in the format
``gs://<bucket_name>/<path_inside_bucket>``. A sub-folder
``<export_folder>`` containing exported files will be
created below it.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(), StorageLink())
def __init__(
self,
*,
destination_gcs_folder: str,
project_id: str,
region: str,
service_id: str,
request_id: Optional[str] = None,
database_dump_type: Optional[DatabaseDumpSpec] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.destination_gcs_folder = destination_gcs_folder
self.project_id = project_id
self.region = region
self.service_id = service_id
self.request_id = request_id
self.database_dump_type = database_dump_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Exporting metadata from Dataproc Metastore service: %s", self.service_id)
hook.export_metadata(
destination_gcs_folder=self.destination_gcs_folder,
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
request_id=self.request_id,
database_dump_type=self.database_dump_type,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
metadata_export = self._wait_for_export_metadata(hook)
self.log.info("Metadata from service %s exported successfully", self.service_id)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_EXPORT_LINK)
uri = self._get_uri_from_destination(MetadataExport.to_dict(metadata_export)["destination_gcs_uri"])
StorageLink.persist(context=context, task_instance=self, uri=uri)
return MetadataExport.to_dict(metadata_export)
def _get_uri_from_destination(self, destination_uri: str):
return destination_uri[5:] if destination_uri.startswith("gs://") else destination_uri
def _wait_for_export_metadata(self, hook: DataprocMetastoreHook):
"""
Workaround to check that export was created successfully.
We discovered a issue to parse result to MetadataExport inside the SDK
"""
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
sleep(time_to_wait)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
activities: MetadataManagementActivity = service.metadata_management_activity
metadata_export: MetadataExport = activities.metadata_exports[0]
if metadata_export.state == MetadataExport.State.SUCCEEDED:
return metadata_export
if metadata_export.state == MetadataExport.State.FAILED:
raise AirflowException(
f"Exporting metadata from Dataproc Metastore {metadata_export.name} FAILED"
)
class DataprocMetastoreGetServiceOperator(BaseOperator):
"""
Gets the details of a single service.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
service_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service_id = service_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Gets the details of a single Dataproc Metastore service: %s", self.project_id)
result = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
return Service.to_dict(result)
class DataprocMetastoreListBackupsOperator(BaseOperator):
"""
Lists backups in a service.
:param project_id: Required. The ID of the Google Cloud project that the backup belongs to.
:param region: Required. The ID of the Google Cloud region that the backup belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
filter: Optional[str] = None,
order_by: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.page_size = page_size
self.page_token = page_token
self.filter = filter
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> List[dict]:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Listing Dataproc Metastore backups: %s", self.service_id)
backups = hook.list_backups(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
page_size=self.page_size,
page_token=self.page_token,
filter=self.filter,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_BACKUPS_LINK)
return [Backup.to_dict(backup) for backup in backups]
class DataprocMetastoreRestoreServiceOperator(BaseOperator):
"""
Restores a service from a backup.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_project_id: Required. The ID of the Google Cloud project that the metastore
service backup to restore from.
:param backup_region: Required. The ID of the Google Cloud region that the metastore
service backup to restore from.
:param backup_service_id: Required. The ID of the metastore service backup to restore from, which is
used as the final component of the metastore service's name. This value must be between 2 and 63
characters long inclusive, begin with a letter, end with a letter or number, and consist
of alphanumeric ASCII characters or hyphens.
:param backup_id: Required. The ID of the metastore service backup to restore from
:param restore_type: Optional. The type of restore. If unspecified, defaults to
``METADATA_ONLY``
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup_project_id: str,
backup_region: str,
backup_service_id: str,
backup_id: str,
restore_type: Optional[Restore] = None,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup_project_id = backup_project_id
self.backup_region = backup_region
self.backup_service_id = backup_service_id
self.backup_id = backup_id
self.restore_type = restore_type
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info(
"Restoring Dataproc Metastore service: %s from backup: %s", self.service_id, self.backup_id
)
hook.restore_service(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_project_id=self.backup_project_id,
backup_region=self.backup_region,
backup_service_id=self.backup_service_id,
backup_id=self.backup_id,
restore_type=self.restore_type,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self._wait_for_restore_service(hook)
self.log.info("Service %s restored from backup %s", self.service_id, self.backup_id)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
def _wait_for_restore_service(self, hook: DataprocMetastoreHook):
"""
Workaround to check that restore service was finished successfully.
We discovered an issue to parse result to Restore inside the SDK
"""
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
sleep(time_to_wait)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
activities: MetadataManagementActivity = service.metadata_management_activity
restore_service: Restore = activities.restores[0]
if restore_service.state == Restore.State.SUCCEEDED:
return restore_service
if restore_service.state == Restore.State.FAILED:
raise AirflowException("Restoring service FAILED")
class DataprocMetastoreUpdateServiceOperator(BaseOperator):
"""
Updates the parameters of a single service.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param service: Required. The metastore service to update. The server only merges fields in the service
if they are specified in ``update_mask``.
The metastore service's ``name`` field is used to identify the metastore service to be updated.
This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param update_mask: Required. A field mask used to specify the fields to be overwritten in the metastore
service resource by the update. Fields specified in the ``update_mask`` are relative to the resource
(not to the full request). A field is overwritten if it is in the mask.
This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
service: Union[Dict, Service],
update_mask: FieldMask,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.service = service
self.update_mask = update_mask
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Updating Dataproc Metastore service: %s", self.service.get("name"))
operation = hook.update_service(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
service=self.service,
update_mask=self.update_mask,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Service %s updated successfully", self.service.get("name"))
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
|
{
"content_hash": "25d8ccf5e77dc0a87b01e9d6ed0c3535",
"timestamp": "",
"source": "github",
"line_count": 1108,
"max_line_length": 110,
"avg_line_length": 44.70036101083033,
"alnum_prop": 0.6470481343886286,
"repo_name": "lyft/incubator-airflow",
"id": "d0ca4a5f28672f055a3750b0f68868468f782d36",
"size": "50317",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/operators/dataproc_metastore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
}
|
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
from c7n_azure.actions.base import AzureBaseAction
from c7n.utils import type_schema
@resources.register('hdinsight')
class Hdinsight(ArmResourceManager):
"""HDInsight Resource
:example:
Finds all Hadoop HDInsight Clusters
.. code-block:: yaml
policies:
- name: hdinsight-policy
resource: azure.hdinsight
filters:
- type: value
key: properties.clusterDefinition.kind
value_type: normalize
value: hadoop
:example:
Finds all HDInsight Clusters with 3 worker nodes
.. code-block:: yaml
policies:
- name: hdinsight-policy
resource: azure.hdinsight
filters:
- type: value
key: properties.computeProfile.roles[?name=='workernode'].targetInstanceCount | [0]
op: eq
value_type: integer
value: 3
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Analytics']
service = 'azure.mgmt.hdinsight'
client = 'HDInsightManagementClient'
enum_spec = ('clusters', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup',
'properties.clusterDefinition.kind',
'properties.tier'
)
resource_type = 'Microsoft.HDInsight/clusters'
@Hdinsight.action_registry.register('resize')
class Resize(AzureBaseAction):
"""
Action to scale HDInsight Clusters
:example:
This policy will resize the cluster to 4 nodes
.. code-block:: yaml
policies:
- name: resize-hdinsight
resource: azure.hdinsight
filters:
- type: value
key: name
value: cctesthdinsight
actions:
- type: resize
count: 4
"""
schema = type_schema(
'resize',
required=['count'],
**{
'count': {'type': 'integer', 'minimum': 1}
})
def _prepare_processing(self):
self.client = self.manager.get_client()
def _process_resource(self, cluster):
self.client.clusters.resize(
cluster['resourceGroup'],
cluster['name'],
target_instance_count=self.data['count']
)
|
{
"content_hash": "ff50f5ee1c6092e42c9d169c8256f707",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 99,
"avg_line_length": 24.89,
"alnum_prop": 0.5600642828445158,
"repo_name": "Sutto/cloud-custodian",
"id": "89252a884855de5d29babcfd32a9ba26b018559d",
"size": "3071",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/c7n_azure/resources/hdinsight.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5283859"
},
{
"name": "Shell",
"bytes": "12627"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
def solution(S):
# write your code in Python 2.7
maximum = 0
S = S.replace('?','.').replace('!','.')
sentences = S.split('.')
# print(sentences)
lengthOfWords = []
for sent in sentences:
sent = sent.strip().split()
lengthOfWords.append(len(sent))
return max(lengthOfWords)
print solution('Forget CVs..Save time . x x')
|
{
"content_hash": "6c5061cef478790bf73eddef64925a33",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 46,
"avg_line_length": 28.307692307692307,
"alnum_prop": 0.5896739130434783,
"repo_name": "saisankargochhayat/algo_quest",
"id": "9d1b5aa1aa03b74e64269d58acccef471a903c7f",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codecontrol/first.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "405"
},
{
"name": "C++",
"bytes": "9149"
},
{
"name": "HTML",
"bytes": "1679"
},
{
"name": "Java",
"bytes": "3648"
},
{
"name": "JavaScript",
"bytes": "786"
},
{
"name": "Python",
"bytes": "248621"
},
{
"name": "Ruby",
"bytes": "2761"
},
{
"name": "Shell",
"bytes": "610"
}
],
"symlink_target": ""
}
|
from typing import Dict, List, Iterable, Tuple, Any, Iterator
import scipy.sparse as sparse
import numpy as np
import loompy
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import h5py
from .utils import compare_loom_spec_version
class GlobalAttributeManager(object):
def __init__(self, f: h5py.File) -> None:
setattr(self, "!f", f)
storage: Dict[str, str] = {}
setattr(self, "!storage", storage)
if "attrs" not in self.f:
for key, val in f.attrs.items():
materialized = loompy.materialize_attr_values(val)
self.__dict__["storage"][key] = materialized
else:
for key, val in f["attrs"].items():
materialized = loompy.materialize_attr_values(val[()])
self.__dict__["storage"][key] = materialized
def keys(self) -> List[str]:
return list(self.__dict__["storage"].keys())
def items(self) -> Iterable[Tuple[str, sparse.coo_matrix]]:
for key in self.keys():
yield (key, self[key])
def __len__(self) -> int:
return len(self.keys())
def __contains__(self, name: str) -> bool:
return name in self.keys()
def __iter__(self) -> Iterator[str]:
for key in self.keys():
yield key
def __getitem__(self, thing: Any) -> np.ndarray:
return self.__getattr__(thing)
def __getattr__(self, name: str) -> np.ndarray:
try:
return self.__dict__["storage"][name]
except KeyError:
if self.f is not None:
if loompy.compare_loom_spec_version(self.f, "3.0.0") < 0:
if name in self.f.attrs:
val = self.f.attrs[name]
else:
raise AttributeError(f"File has no global attribute '{name}'")
else:
if name in self.f["attrs"]:
val = self.f["attrs"][name]
else:
raise AttributeError(f"File has no global attribute '{name}'")
materialized = loompy.materialize_attr_values(val)
self.__dict__["storage"][name] = materialized
return materialized
def __setitem__(self, name: str, val: Any) -> None:
return self.__setattr__(name, val)
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith("!"):
super(GlobalAttributeManager, self).__setattr__(name[1:], val)
elif "/" in name:
raise KeyError("Attribute name cannot contain slash (/)")
else:
if self.f is not None:
if loompy.compare_loom_spec_version(self.f, "3.0.0") < 0 and "attrs" not in self.f["/"]:
normalized = loompy.normalize_attr_values(val, False)
self.f.attrs[name] = normalized
self.f.flush()
val = self.f.attrs[name]
# Read it back in to ensure it's synced and normalized
normalized = loompy.materialize_attr_values(val)
self.__dict__["storage"][name] = normalized
else:
normalized = loompy.normalize_attr_values(val, True)
if name in self.f["attrs"]:
del self.f["attrs"][name]
if not np.isscalar(normalized) and normalized.dtype == np.object_:
self.ds._file.create_dataset("attrs/" + name, data=normalized, dtype=h5py.special_dtype(vlen=str))
else:
self.f["attrs"][name] = normalized
self.f.flush()
val = self.f["attrs"][name][()]
# Read it back in to ensure it's synced and normalized
normalized = loompy.materialize_attr_values(val)
self.__dict__["storage"][name] = normalized
def __delattr__(self, name: str) -> None:
if name.startswith("!"):
super(GlobalAttributeManager, self).__delattr__(name[1:])
else:
if self.f is not None:
if loompy.compare_loom_spec_version(self.f, "3.0.0") < 0:
if name in self.f.attrs:
del self.f.attrs[name]
else:
if name in self.f["attrs"]:
del self.f["attrs"][name]
del self.__dict__["storage"][name]
def get(self, name: str, default: Any = None) -> np.ndarray:
"""
Return the value for a named attribute if it exists, else default.
If default is not given, it defaults to None, so that this method never raises a KeyError.
"""
if name in self:
return self[name]
else:
return default
|
{
"content_hash": "59b2cd6fb6103469f3d9587bf3a39978",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 104,
"avg_line_length": 33.19491525423729,
"alnum_prop": 0.6451365841205003,
"repo_name": "linnarsson-lab/loompy",
"id": "3d336ecde2b830c728d667b62b13907a90553b2d",
"size": "3917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loompy/global_attribute_manager.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "22975"
},
{
"name": "Python",
"bytes": "171759"
},
{
"name": "Shell",
"bytes": "3354"
}
],
"symlink_target": ""
}
|
"""
This module contains the main interface to the botocore package, the
Session object.
"""
import copy
import logging
import os
import platform
from botocore import __version__
import botocore.config
import botocore.credentials
import botocore.client
from botocore.exceptions import ConfigNotFound, ProfileNotFound
from botocore import handlers
from botocore.hooks import HierarchicalEmitter, first_non_none_response
from botocore.loaders import create_loader
from botocore.parsers import ResponseParserFactory
from botocore import regions
from botocore.model import ServiceModel
from botocore import paginate
from botocore import waiter
from botocore import retryhandler, translate
class Session(object):
"""
The Session object collects together useful functionality
from `botocore` as well as important data such as configuration
information and credentials into a single, easy-to-use object.
:ivar available_profiles: A list of profiles defined in the config
file associated with this session.
:ivar profile: The current profile.
"""
#: A default dictionary that maps the logical names for session variables
#: to the specific environment variables and configuration file names
#: that contain the values for these variables.
#: When creating a new Session object, you can pass in your own dictionary
#: to remap the logical names or to add new logical names. You can then
#: get the current value for these variables by using the
#: ``get_config_variable`` method of the :class:`botocore.session.Session`
#: class.
#: These form the keys of the dictionary. The values in the dictionary
#: are tuples of (<config_name>, <environment variable>, <default value>,
#: <conversion func>).
#: The conversion func is a function that takes the configuration value
#: as an argument and returns the converted value. If this value is
#: None, then the configuration value is returned unmodified. This
#: conversion function can be used to type convert config values to
#: values other than the default values of strings.
#: The ``profile`` and ``config_file`` variables should always have a
#: None value for the first entry in the tuple because it doesn't make
#: sense to look inside the config file for the location of the config
#: file or for the default profile to use.
#: The ``config_name`` is the name to look for in the configuration file,
#: the ``env var`` is the OS environment variable (``os.environ``) to
#: use, and ``default_value`` is the value to use if no value is otherwise
#: found.
SESSION_VARIABLES = {
# logical: config_file, env_var, default_value, conversion_func
'profile': (None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None),
'region': ('region', 'AWS_DEFAULT_REGION', None, None),
'data_path': ('data_path', 'AWS_DATA_PATH', None, None),
'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config', None),
'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),
# This is the shared credentials file amongst sdks.
'credentials_file': (None, 'AWS_SHARED_CREDENTIALS_FILE',
'~/.aws/credentials', None),
# These variables only exist in the config file.
# This is the number of seconds until we time out a request to
# the instance metadata service.
'metadata_service_timeout': ('metadata_service_timeout',
'AWS_METADATA_SERVICE_TIMEOUT', 1, int),
# This is the number of request attempts we make until we give
# up trying to retrieve data from the instance metadata service.
'metadata_service_num_attempts': ('metadata_service_num_attempts',
'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int),
}
#: The default format string to use when configuring the botocore logger.
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, session_vars=None, event_hooks=None,
include_builtin_handlers=True, profile=None):
"""
Create a new Session object.
:type session_vars: dict
:param session_vars: A dictionary that is used to override some or all
of the environment variables associated with this session. The
key/value pairs defined in this dictionary will override the
corresponding variables defined in ``SESSION_VARIABLES``.
:type event_hooks: BaseEventHooks
:param event_hooks: The event hooks object to use. If one is not
provided, an event hooks object will be automatically created
for you.
:type include_builtin_handlers: bool
:param include_builtin_handlers: Indicates whether or not to
automatically register builtin handlers.
:type profile: str
:param profile: The name of the profile to use for this
session. Note that the profile can only be set when
the session is created.
"""
self.session_var_map = copy.copy(self.SESSION_VARIABLES)
if session_vars:
self.session_var_map.update(session_vars)
if event_hooks is None:
self._events = HierarchicalEmitter()
else:
self._events = event_hooks
if include_builtin_handlers:
self._register_builtin_handlers(self._events)
self.user_agent_name = 'Botocore'
self.user_agent_version = __version__
self.user_agent_extra = ''
# The _profile attribute is just used to cache the value
# of the current profile to avoid going through the normal
# config lookup process each access time.
self._profile = None
self._config = None
self._credentials = None
self._profile_map = None
# This is a dict that stores per session specific config variable
# overrides via set_config_variable().
self._session_instance_vars = {}
if profile is not None:
self._session_instance_vars['profile'] = profile
self._client_config = None
self._components = ComponentLocator()
self._register_components()
def _register_components(self):
self._register_credential_provider()
self._register_data_loader()
self._register_endpoint_resolver()
self._register_event_emitter()
self._register_response_parser_factory()
def _register_event_emitter(self):
self._components.register_component('event_emitter', self._events)
def _register_credential_provider(self):
self._components.lazy_register_component(
'credential_provider',
lambda: botocore.credentials.create_credential_resolver(self))
def _register_data_loader(self):
self._components.lazy_register_component(
'data_loader',
lambda: create_loader(self.get_config_variable('data_path')))
def _register_endpoint_resolver(self):
self._components.lazy_register_component(
'endpoint_resolver',
lambda: regions.EndpointResolver(self.get_data('_endpoints')))
def _register_response_parser_factory(self):
self._components.register_component('response_parser_factory',
ResponseParserFactory())
def _register_builtin_handlers(self, events):
for spec in handlers.BUILTIN_HANDLERS:
if len(spec) == 2:
event_name, handler = spec
self.register(event_name, handler)
else:
event_name, handler, register_type = spec
if register_type is handlers.REGISTER_FIRST:
self._events.register_first(event_name, handler)
elif register_type is handlers.REGISTER_LAST:
self._events.register_last(event_name, handler)
@property
def available_profiles(self):
return list(self._build_profile_map().keys())
def _build_profile_map(self):
# This will build the profile map if it has not been created,
# otherwise it will return the cached value. The profile map
# is a list of profile names, to the config values for the profile.
if self._profile_map is None:
self._profile_map = self.full_config['profiles']
return self._profile_map
@property
def profile(self):
if self._profile is None:
profile = self.get_config_variable('profile')
self._profile = profile
return self._profile
def get_config_variable(self, logical_name,
methods=('instance', 'env', 'config')):
"""
Retrieve the value associated with the specified logical_name
from the environment or the config file. Values found in the
environment variable take precedence of values found in the
config file. If no value can be found, a None will be returned.
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to retrieve. This name will be mapped to the
appropriate environment variable name for this session as
well as the appropriate config file entry.
:type method: tuple
:param method: Defines which methods will be used to find
the variable value. By default, all available methods
are tried but you can limit which methods are used
by supplying a different value to this parameter.
Valid choices are: instance|env|config
:returns: value of variable or None if not defined.
"""
# Handle all the short circuit special cases first.
if logical_name not in self.session_var_map:
return
# Do the actual lookups. We need to handle
# 'instance', 'env', and 'config' locations, in that order.
value = None
var_config = self.session_var_map[logical_name]
if self._found_in_instance_vars(methods, logical_name):
return self._session_instance_vars[logical_name]
elif self._found_in_env(methods, var_config):
value = self._retrieve_from_env(var_config[1], os.environ)
elif self._found_in_config_file(methods, var_config):
value = self.get_scoped_config()[var_config[0]]
if value is None:
value = var_config[2]
if var_config[3] is not None:
value = var_config[3](value)
return value
def _found_in_instance_vars(self, methods, logical_name):
if 'instance' in methods:
return logical_name in self._session_instance_vars
return False
def _found_in_env(self, methods, var_config):
return (
'env' in methods and
var_config[1] is not None and
self._retrieve_from_env(var_config[1], os.environ) is not None)
def _found_in_config_file(self, methods, var_config):
if 'config' in methods and var_config[0] is not None:
return var_config[0] in self.get_scoped_config()
return False
def _retrieve_from_env(self, names, environ):
# We need to handle the case where names is either
# a single value or a list of variables.
if not isinstance(names, list):
names = [names]
for name in names:
if name in environ:
return environ[name]
return None
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SESSION_VARIABLES``.
:param value: The value to associate with the config variable.
"""
self._session_instance_vars[logical_name] = value
def get_scoped_config(self):
"""
Returns the config values from the config file scoped to the current
profile.
The configuration data is loaded **only** from the config file.
It does not resolve variables based on different locations
(e.g. first from the session instance, then from environment
variables, then from the config file). If you want this lookup
behavior, use the ``get_config_variable`` method instead.
Note that this configuration is specific to a single profile (the
``profile`` session variable).
If the ``profile`` session variable is set and the profile does
not exist in the config file, a ``ProfileNotFound`` exception
will be raised.
:raises: ConfigNotFound, ConfigParseError, ProfileNotFound
:rtype: dict
"""
profile_name = self.get_config_variable('profile')
profile_map = self._build_profile_map()
# If a profile is not explicitly set return the default
# profile config or an empty config dict if we don't have
# a default profile.
if profile_name is None:
return profile_map.get('default', {})
elif profile_name not in profile_map:
# Otherwise if they specified a profile, it has to
# exist (even if it's the default profile) otherwise
# we complain.
raise ProfileNotFound(profile=profile_name)
else:
return profile_map[profile_name]
@property
def full_config(self):
"""Return the parsed config file.
The ``get_config`` method returns the config associated with the
specified profile. This property returns the contents of the
**entire** config file.
:rtype: dict
"""
if self._config is None:
try:
config_file = self.get_config_variable('config_file')
self._config = botocore.config.load_config(config_file)
except ConfigNotFound:
self._config = {'profiles': {}}
try:
# Now we need to inject the profiles from the
# credentials file. We don't actually need the values
# in the creds file, only the profile names so that we
# can validate the user is not referring to a nonexistent
# profile.
cred_file = self.get_config_variable('credentials_file')
cred_profiles = botocore.config.raw_config_parse(cred_file)
for profile in cred_profiles:
cred_vars = cred_profiles[profile]
if profile not in self._config['profiles']:
self._config['profiles'][profile] = cred_vars
else:
self._config['profiles'][profile].update(cred_vars)
except ConfigNotFound:
pass
return self._config
def get_default_client_config(self):
"""Retrieves the default config for creating clients
:rtype: botocore.client.Config
:returns: The default client config object when creating clients. If
the value is ``None`` then there is no default config object
attached to the session.
"""
return self._client_config
def set_default_client_config(self, client_config):
"""Sets the default config for creating clients
:type client_config: botocore.client.Config
:param client_config: The default client config object when creating
clients. If the value is ``None`` then there is no default config
object attached to the session.
"""
self._client_config = client_config
def set_credentials(self, access_key, secret_key, token=None):
"""
Manually create credentials for this session. If you would
prefer to use botocore without a config file, environment variables,
or IAM roles, you can pass explicit credentials into this
method to establish credentials for this session.
:type access_key: str
:param access_key: The access key part of the credentials.
:type secret_key: str
:param secret_key: The secret key part of the credentials.
:type token: str
:param token: An option session token used by STS session
credentials.
"""
self._credentials = botocore.credentials.Credentials(access_key,
secret_key,
token)
def get_credentials(self):
"""
Return the :class:`botocore.credential.Credential` object
associated with this session. If the credentials have not
yet been loaded, this will attempt to load them. If they
have already been loaded, this will return the cached
credentials.
"""
if self._credentials is None:
self._credentials = self._components.get_component(
'credential_provider').load_credentials()
return self._credentials
def user_agent(self):
"""
Return a string suitable for use as a User-Agent header.
The string will be of the form:
<agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver>
Where:
- agent_name is the value of the `user_agent_name` attribute
of the session object (`Boto` by default).
- agent_version is the value of the `user_agent_version`
attribute of the session object (the botocore version by default).
by default.
- py_ver is the version of the Python interpreter beng used.
- plat_name is the name of the platform (e.g. Darwin)
- plat_ver is the version of the platform
If ``user_agent_extra`` is not empty, then this value will be
appended to the end of the user agent string.
"""
base = '%s/%s Python/%s %s/%s' % (self.user_agent_name,
self.user_agent_version,
platform.python_version(),
platform.system(),
platform.release())
if self.user_agent_extra:
base += ' %s' % self.user_agent_extra
return base
def get_data(self, data_path):
"""
Retrieve the data associated with `data_path`.
:type data_path: str
:param data_path: The path to the data you wish to retrieve.
"""
return self.get_component('data_loader').load_data(data_path)
def get_service_model(self, service_name, api_version=None):
"""Get the service model object.
:type service_name: string
:param service_name: The service name
:type api_version: string
:param api_version: The API version of the service. If none is
provided, then the latest API version will be used.
:rtype: L{botocore.model.ServiceModel}
:return: The botocore service model for the service.
"""
service_description = self.get_service_data(service_name, api_version)
return ServiceModel(service_description, service_name=service_name)
def get_waiter_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
waiter_config = loader.load_service_model(
service_name, 'waiters-2', api_version)
return waiter.WaiterModel(waiter_config)
def get_paginator_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
paginator_config = loader.load_service_model(
service_name, 'paginators-1', api_version)
return paginate.PaginatorModel(paginator_config)
def get_service_data(self, service_name, api_version=None):
"""
Retrieve the fully merged data associated with a service.
"""
data_path = service_name
service_data = self.get_component('data_loader').load_service_model(
data_path,
type_name='service-2',
api_version=api_version
)
self._events.emit('service-data-loaded.%s' % service_name,
service_data=service_data,
service_name=service_name, session=self)
return service_data
def get_available_services(self):
"""
Return a list of names of available services.
"""
return self.get_component('data_loader')\
.list_available_services(type_name='service-2')
def set_debug_logger(self, logger_name='botocore'):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
self.set_stream_logger(logger_name, logging.DEBUG)
def set_stream_logger(self, logger_name, log_level, stream=None,
format_string=None):
"""
Convenience method to configure a stream logger.
:type logger_name: str
:param logger_name: The name of the logger to configure
:type log_level: str
:param log_level: The log level to set for the logger. This
is any param supported by the ``.setLevel()`` method of
a ``Log`` object.
:type stream: file
:param stream: A file like object to log to. If none is provided
then sys.stderr will be used.
:type format_string: str
:param format_string: The format string to use for the log
formatter. If none is provided this will default to
``self.LOG_FORMAT``.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream)
ch.setLevel(log_level)
# create formatter
if format_string is None:
format_string = self.LOG_FORMAT
formatter = logging.Formatter(format_string)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def set_file_logger(self, log_level, path, logger_name='botocore'):
"""
Convenience function to quickly configure any level of logging
to a file.
:type log_level: int
:param log_level: A log level as specified in the `logging` module
:type path: string
:param path: Path to the log file. The file will be created
if it doesn't already exist.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(path)
ch.setLevel(log_level)
# create formatter
formatter = logging.Formatter(self.LOG_FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to invoke when the event
is emitted. This object must be callable, and must
accept ``**kwargs``. If either of these preconditions are
not met, a ``ValueError`` will be raised.
:type unique_id: str
:param unique_id: An optional identifier to associate with the
registration. A unique_id can only be used once for
the entire session registration (unless it is unregistered).
This can be used to prevent an event handler from being
registered twice.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every register call
using the unique id has been matched by an ``unregister`` call.
If ``unique_id`` is specified, subsequent ``register``
calls must use the same value for ``unique_id_uses_count``
as the ``register`` call that first registered the event.
:raises ValueError: If the call to ``register`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.register(event_name, handler, unique_id,
unique_id_uses_count=unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to unregister.
:type unique_id: str
:param unique_id: A unique identifier identifying the callback
to unregister. You can provide either the handler or the
unique_id, you do not have to provide both.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every ``register``
call using the ``unique_id`` has been matched by an ``unregister``
call. If the ``unique_id`` is specified, subsequent
``unregister`` calls must use the same value for
``unique_id_uses_count`` as the ``register`` call that first
registered the event.
:raises ValueError: If the call to ``unregister`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.unregister(event_name, handler=handler,
unique_id=unique_id,
unique_id_uses_count=unique_id_uses_count)
def emit(self, event_name, **kwargs):
return self._events.emit(event_name, **kwargs)
def emit_first_non_none_response(self, event_name, **kwargs):
responses = self._events.emit(event_name, **kwargs)
return first_non_none_response(responses)
def get_component(self, name):
return self._components.get_component(name)
def register_component(self, name, component):
self._components.register_component(name, component)
def lazy_register_component(self, name, component):
self._components.lazy_register_component(name, component)
def create_client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
"""Create a botocore client.
:type service_name: string
:param service_name: The name of the service for which a client will
be created. You can use the ``Sesssion.get_available_services()``
method to get a list of all available service names.
:type region_name: string
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:type api_version: string
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:type aws_secret_access_key: string
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:type aws_session_token: string
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type config: botocore.client.Config
:param config: Advanced client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
:rtype: botocore.client.BaseClient
:return: A botocore client instance
"""
default_client_config = self.get_default_client_config()
# If a config is provided and a default config is set, then
# use the config resulting from merging the two.
if config is not None and default_client_config is not None:
config = default_client_config.merge(config)
# If a config was not provided then use the default
# client config from the session
elif default_client_config is not None:
config = default_client_config
if region_name is None:
if config and config.region_name is not None:
region_name = config.region_name
else:
region_name = self.get_config_variable('region')
loader = self.get_component('data_loader')
event_emitter = self.get_component('event_emitter')
response_parser_factory = self.get_component(
'response_parser_factory')
if aws_secret_access_key is not None:
credentials = botocore.credentials.Credentials(
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
token=aws_session_token)
else:
credentials = self.get_credentials()
endpoint_resolver = self.get_component('endpoint_resolver')
client_creator = botocore.client.ClientCreator(
loader, endpoint_resolver, self.user_agent(), event_emitter,
retryhandler, translate, response_parser_factory)
client = client_creator.create_client(
service_name, region_name, use_ssl, endpoint_url, verify,
credentials, scoped_config=self.get_scoped_config(),
client_config=config, api_version=api_version)
return client
class ComponentLocator(object):
"""Service locator for session components."""
def __init__(self):
self._components = {}
self._deferred = {}
def get_component(self, name):
if name in self._deferred:
factory = self._deferred[name]
self._components[name] = factory()
# Only delete the component from the deferred dict after
# successfully creating the object from the factory as well as
# injecting the instantiated value into the _components dict.
del self._deferred[name]
try:
return self._components[name]
except KeyError:
raise ValueError("Unknown component: %s" % name)
def register_component(self, name, component):
self._components[name] = component
try:
del self._deferred[name]
except KeyError:
pass
def lazy_register_component(self, name, no_arg_factory):
self._deferred[name] = no_arg_factory
try:
del self._components[name]
except KeyError:
pass
def get_session(env_vars=None):
"""
Return a new session object.
"""
return Session(env_vars)
|
{
"content_hash": "0db7c4eaf48fb17fb487be6120e7d1ac",
"timestamp": "",
"source": "github",
"line_count": 825,
"max_line_length": 87,
"avg_line_length": 41.89575757575758,
"alnum_prop": 0.6193727577826641,
"repo_name": "mnahm5/django-estore",
"id": "6f91652ec5a4367e22b008e944d53535cd33c052",
"size": "35190",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lib/site-packages/botocore/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1351"
},
{
"name": "Batchfile",
"bytes": "2695"
},
{
"name": "C",
"bytes": "460931"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "144496"
},
{
"name": "HTML",
"bytes": "155544"
},
{
"name": "JavaScript",
"bytes": "206799"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "24837167"
},
{
"name": "Shell",
"bytes": "4408"
},
{
"name": "Tcl",
"bytes": "1237789"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
}
|
"""Manage class and methods for sessions."""
import logging
import pandas
import utils
# Load logging configuration
log = logging.getLogger(__name__)
class Session:
"""Session class."""
def update_session_status(self, authorization: str, session_id: int, session_status: str):
"""Update a session status."""
query = 'mutation updateSessionStatus($id: Int!, $sessionPatch: SessionPatch!){updateSessionById(input:{id: $id, sessionPatch: $sessionPatch}){session{status}}}'
variables = {'id': session_id, 'sessionPatch': {'status': session_status}}
payload = {'query': query, 'variables': variables}
response = utils.execute_graphql_request(authorization, payload)
return response
def compute_session_result(self, authorization: str, session_id: int, alert_operator: str, alert_threshold: str, result_data: pandas.DataFrame):
"""Compute aggregated results for the indicator session."""
log.info('Compute session results.')
nb_records = len(result_data)
nb_records_alert = len(result_data.loc[result_data['Alert'] == True]) # pylint: disable=C0121
nb_records_no_alert = len(result_data.loc[result_data['Alert'] == False]) # pylint: disable=C0121
# Post results to database
query = 'mutation updateSessionResults($id: Int!, $sessionPatch: SessionPatch!){updateSessionById(input:{id: $id, sessionPatch: $sessionPatch}){session{id}}}'
variables = {}
variables['id'] = session_id
variables['sessionPatch'] = {}
variables['sessionPatch']['alertOperator'] = alert_operator
variables['sessionPatch']['alertThreshold'] = float(alert_threshold) # Alert threshold is stored as string in parameters and needs to be converted to float
variables['sessionPatch']['nbRecordsNoAlert'] = nb_records_no_alert
variables['sessionPatch']['nbRecordsAlert'] = nb_records_alert
variables['sessionPatch']['nbRecords'] = nb_records
payload = {'query': query, 'variables': variables}
utils.execute_graphql_request(authorization, payload)
return nb_records_alert
|
{
"content_hash": "ecdd80bfdebcb47a3402107b2e2835f4",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 169,
"avg_line_length": 48.75,
"alnum_prop": 0.6797202797202797,
"repo_name": "alexisrolland/data-quality",
"id": "fb5670f11aa431378fa5a2731f31ca8cd4632799",
"size": "2145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/init/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63"
},
{
"name": "Dockerfile",
"bytes": "2426"
},
{
"name": "HTML",
"bytes": "3999"
},
{
"name": "JavaScript",
"bytes": "40173"
},
{
"name": "PLpgSQL",
"bytes": "10320"
},
{
"name": "Python",
"bytes": "80510"
},
{
"name": "Shell",
"bytes": "26835"
}
],
"symlink_target": ""
}
|
from .euler_flat_earth import EulerFlatEarth
|
{
"content_hash": "752d25eb434f0cc55b47ad01955b7346",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 44,
"avg_line_length": 45,
"alnum_prop": 0.8444444444444444,
"repo_name": "AeroPython/PyFME",
"id": "2f1951d20928452a0bfaacf59c129a3ead02d646",
"size": "45",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/pyfme/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "190916"
}
],
"symlink_target": ""
}
|
from common import * # nopep8
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'onadata_test',
'USER': 'postgres',
'PASSWORD': '',
'HOST': '127.0.0.1',
'OPTIONS': {
# note: this option obsolete starting with django 1.6
'autocommit': True,
}
}
}
SECRET_KEY = 'mlfs33^s1l4xf6a36$0#j%dd*sisfoi&)&4s-v=91#^l01v)*j'
if len(sys.argv) >= 2 and (sys.argv[1] == "test" or sys.argv[1] == "test_all"):
# This trick works only when we run tests from the command line.
TESTING_MODE = True
else:
TESTING_MODE = False
if TESTING_MODE:
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'test_media/')
subprocess.call(["rm", "-r", MEDIA_ROOT])
MONGO_DATABASE['NAME'] = "formhub_test"
# need to have CELERY_ALWAYS_EAGER True and BROKER_BACKEND as memory
# to run tasks immediately while testing
CELERY_ALWAYS_EAGER = True
BROKER_BACKEND = 'memory'
ENKETO_API_TOKEN = 'abc'
else:
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media/')
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'onadata.libs.utils.middleware.HTTPResponseNotAllowedMiddleware',
)
|
{
"content_hash": "18ed368030345dacac73210723a40223",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 32.32692307692308,
"alnum_prop": 0.6650803093396788,
"repo_name": "jnordling/cabin",
"id": "58e9630992b2626f5a3acb23e159594815ad9eea",
"size": "1738",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "onadata/settings/travis_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "HTML",
"bytes": "248525"
},
{
"name": "JavaScript",
"bytes": "904742"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "2813604"
},
{
"name": "Shell",
"bytes": "14149"
}
],
"symlink_target": ""
}
|
from .server import Server
from .client import Client
|
{
"content_hash": "40ed6eded3bdc50185b7a66c4b970680",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 26,
"avg_line_length": 26.5,
"alnum_prop": 0.8301886792452831,
"repo_name": "baverman/jeque",
"id": "e5cdf2872589877a5bbbaa5e4185f9563a114aa7",
"size": "53",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jeque/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16857"
}
],
"symlink_target": ""
}
|
'''
@author: shibkov
'''
from actions import SlavePack
|
{
"content_hash": "4737e88397a2963e6f9ab764e2192dee",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 29,
"avg_line_length": 11.2,
"alnum_prop": 0.6964285714285714,
"repo_name": "barsgroup/objectpack",
"id": "0a12b27ce4474d3286b7a4d7215c0e9d0acdbcdf",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/objectpack/slave_object_pack/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1132"
},
{
"name": "HTML",
"bytes": "13501"
},
{
"name": "JavaScript",
"bytes": "35443"
},
{
"name": "Python",
"bytes": "220784"
}
],
"symlink_target": ""
}
|
from typing import Any, Dict, List, Literal, NamedTuple, Optional, Sequence, Tuple, TypedDict, Union
InitConfigType = Dict[str, Any]
AgentConfigType = Dict[str, Any]
InstanceType = Dict[str, Any]
ProxySettings = TypedDict(
'ProxySettings', {'http': Optional[str], 'https': Optional[str], 'no': List[str]}, total=False
)
# NOTE: a bit involved, but this is basically a type checking-friendly `NamedTuple`-based version of an `Enum`.
# We don't use an actual `Enum` because for backwards compatibility we need e.g. `ServiceCheck.OK` to be
# `0` (the integer), instead of an opaque enum instance.
ServiceCheckStatus = Literal[0, 1, 2, 3] # Can serve as an int enum type for type checking purposes.
_ServiceCheckType = NamedTuple(
'_ServiceCheckType',
[
('OK', ServiceCheckStatus),
('WARNING', ServiceCheckStatus),
('CRITICAL', ServiceCheckStatus),
('UNKNOWN', ServiceCheckStatus),
],
)
ServiceCheck = _ServiceCheckType(0, 1, 2, 3) # For public enum-style use: `ServiceCheck.OK`, ...
ExternalTagType = Tuple[str, Dict[str, List[str]]]
Event = TypedDict(
'Event',
{
'timestamp': int,
'event_type': str,
'api_key': str,
'msg_title': str,
'msg_text': str,
'aggregation_key': str,
'alert_type': Literal['error', 'warning', 'success', 'info'],
'source_type_name': str,
'host': str,
'tags': Sequence[Union[str, bytes]],
'priority': Literal['normal', 'low'],
},
)
|
{
"content_hash": "53da87a0cc66e0371df3325f505313ef",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 111,
"avg_line_length": 35.06976744186046,
"alnum_prop": 0.6352785145888594,
"repo_name": "DataDog/integrations-core",
"id": "ed97bc44c204f52d61c08da0f6ab463e401a3362",
"size": "1623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datadog_checks_base/datadog_checks/base/types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
}
|
"""Configuration for editorial application."""
from django.apps import AppConfig
from search import register_watson
class EditorialAppConfig(AppConfig):
"""Configure editorial app."""
name = "editorial"
def ready(self):
"""Add models to search. Add models to Activity Stream."""
for model_name in [
"Project",
"Story",
"Facet",
"Task",
"Event",
"Call",
"Pitch",
"Assignment",
"ContractorProfile",
"OrganizationContractorAffiliation",
"FacetTemplate",
"ImageAsset",
"DocumentAsset",
"AudioAsset",
"VideoAsset",
"SimpleImage",
"SimpleDocument",
"SimpleAudio",
"SimpleVideo",
"Note",
]:
register_watson(self, model_name)
# TODO register keywords fields for facet and all kinds of assets
# image_asset = self.get_model("ImageAsset")
# register_watson(image_asset, "keywords")
# register models for activity
from actstream import registry
registry.register(self.get_model('User'))
registry.register(self.get_model('ContractorProfile'))
registry.register(self.get_model('OrganizationContractorAffiliation'))
registry.register(self.get_model('Call'))
registry.register(self.get_model('Pitch'))
registry.register(self.get_model('Assignment'))
registry.register(self.get_model('Organization'))
registry.register(self.get_model('Network'))
registry.register(self.get_model('Project'))
registry.register(self.get_model('Story'))
registry.register(self.get_model('Facet'))
registry.register(self.get_model('FacetTemplate'))
registry.register(self.get_model('Task'))
registry.register(self.get_model('Event'))
registry.register(self.get_model('ImageAsset'))
registry.register(self.get_model('DocumentAsset'))
registry.register(self.get_model('AudioAsset'))
registry.register(self.get_model('VideoAsset'))
registry.register(self.get_model('SimpleImage'))
registry.register(self.get_model('SimpleDocument'))
registry.register(self.get_model('SimpleAudio'))
registry.register(self.get_model('SimpleVideo'))
registry.register(self.get_model('Note'))
registry.register(self.get_model('Comment'))
|
{
"content_hash": "a8e96ce71756ff993f1e3a3f043e3322",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 37,
"alnum_prop": 0.5911196911196911,
"repo_name": "ProjectFacet/facet",
"id": "3e7d0639bc32736810f0025b73107161b5f93b4a",
"size": "2590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/editorial/apps.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4350483"
},
{
"name": "HTML",
"bytes": "1677386"
},
{
"name": "JavaScript",
"bytes": "1120019"
},
{
"name": "Python",
"bytes": "804022"
},
{
"name": "Ruby",
"bytes": "225"
},
{
"name": "Shell",
"bytes": "889"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
from django.db import IntegrityError, transaction
from django.db.models.signals import post_save
from sentry.models import Release, TagValue
from sentry.tasks.clear_expired_resolutions import clear_expired_resolutions
def ensure_release_exists(instance, created, **kwargs):
if instance.key != 'sentry:release':
return
if instance.data and instance.data.get('release_id'):
return
try:
with transaction.atomic():
release = Release.objects.create(
project=instance.project,
organization_id=instance.project.organization_id,
version=instance.value,
date_added=instance.first_seen,
)
release.add_project(instance.project)
except IntegrityError:
pass
else:
instance.update(data={'release_id': release.id})
def resolve_group_resolutions(instance, created, **kwargs):
if not created:
return
clear_expired_resolutions.delay(release_id=instance.id)
post_save.connect(
resolve_group_resolutions,
sender=Release,
dispatch_uid="resolve_group_resolutions",
weak=False
)
post_save.connect(
ensure_release_exists,
sender=TagValue,
dispatch_uid="ensure_release_exists",
weak=False
)
|
{
"content_hash": "7fa6dce358fe14d2add0830ee93e0463",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 76,
"avg_line_length": 25.807692307692307,
"alnum_prop": 0.6721311475409836,
"repo_name": "zenefits/sentry",
"id": "21112c080ead20f742b32f2cd1e503833eefb079",
"size": "1342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/receivers/releases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "249557"
},
{
"name": "HTML",
"bytes": "293019"
},
{
"name": "JavaScript",
"bytes": "975797"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5959"
},
{
"name": "Python",
"bytes": "12550461"
},
{
"name": "Ruby",
"bytes": "4026"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
import json
import sys
from typing import Any, Dict, List, Tuple, Union
from urllib.parse import unquote
from moto.core.utils import path_url
from moto.utilities.aws_headers import amz_crc32, amzn_request_id
from moto.core.responses import BaseResponse, TYPE_RESPONSE
from .models import lambda_backends, LambdaBackend
class LambdaResponse(BaseResponse):
def __init__(self) -> None:
super().__init__(service_name="awslambda")
@property
def json_body(self) -> Dict[str, Any]: # type: ignore[misc]
return json.loads(self.body)
@property
def backend(self) -> LambdaBackend:
return lambda_backends[self.current_account][self.region]
def root(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
self.setup_class(request, full_url, headers)
if request.method == "GET":
return self._list_functions()
elif request.method == "POST":
return self._create_function()
else:
raise ValueError("Cannot handle request")
def event_source_mappings(
self, request: Any, full_url: str, headers: Any
) -> TYPE_RESPONSE:
self.setup_class(request, full_url, headers)
if request.method == "GET":
querystring = self.querystring
event_source_arn = querystring.get("EventSourceArn", [None])[0]
function_name = querystring.get("FunctionName", [None])[0]
return self._list_event_source_mappings(event_source_arn, function_name)
elif request.method == "POST":
return self._create_event_source_mapping()
else:
raise ValueError("Cannot handle request")
def aliases(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE: # type: ignore[return]
self.setup_class(request, full_url, headers)
if request.method == "POST":
return self._create_alias()
def alias(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE: # type: ignore[return]
self.setup_class(request, full_url, headers)
if request.method == "DELETE":
return self._delete_alias()
elif request.method == "GET":
return self._get_alias()
elif request.method == "PUT":
return self._update_alias()
def event_source_mapping(
self, request: Any, full_url: str, headers: Any
) -> TYPE_RESPONSE:
self.setup_class(request, full_url, headers)
path = request.path if hasattr(request, "path") else path_url(request.url)
uuid = path.split("/")[-1]
if request.method == "GET":
return self._get_event_source_mapping(uuid)
elif request.method == "PUT":
return self._update_event_source_mapping(uuid)
elif request.method == "DELETE":
return self._delete_event_source_mapping(uuid)
else:
raise ValueError("Cannot handle request")
def list_layers(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE: # type: ignore[return]
self.setup_class(request, full_url, headers)
if request.method == "GET":
return self._list_layers()
def layers_version(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE: # type: ignore[return]
self.setup_class(request, full_url, headers)
if request.method == "DELETE":
return self._delete_layer_version()
elif request.method == "GET":
return self._get_layer_version()
def layers_versions(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE: # type: ignore[return]
self.setup_class(request, full_url, headers)
if request.method == "GET":
return self._get_layer_versions()
if request.method == "POST":
return self._publish_layer_version()
def function(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
self.setup_class(request, full_url, headers)
if request.method == "GET":
return self._get_function()
elif request.method == "DELETE":
return self._delete_function()
else:
raise ValueError("Cannot handle request")
def versions(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
self.setup_class(request, full_url, headers)
if request.method == "GET":
# This is ListVersionByFunction
path = request.path if hasattr(request, "path") else path_url(request.url)
function_name = path.split("/")[-2]
return self._list_versions_by_function(function_name)
elif request.method == "POST":
return self._publish_function()
else:
raise ValueError("Cannot handle request")
@amz_crc32
@amzn_request_id
def invoke(self, request: Any, full_url: str, headers: Any) -> Tuple[int, Dict[str, str], Union[str, bytes]]: # type: ignore[misc]
self.setup_class(request, full_url, headers)
if request.method == "POST":
return self._invoke(request)
else:
raise ValueError("Cannot handle request")
@amz_crc32
@amzn_request_id
def invoke_async(self, request: Any, full_url: str, headers: Any) -> Tuple[int, Dict[str, str], Union[str, bytes]]: # type: ignore[misc]
self.setup_class(request, full_url, headers)
if request.method == "POST":
return self._invoke_async()
else:
raise ValueError("Cannot handle request")
def tag(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
self.setup_class(request, full_url, headers)
if request.method == "GET":
return self._list_tags()
elif request.method == "POST":
return self._tag_resource()
elif request.method == "DELETE":
return self._untag_resource()
else:
raise ValueError(f"Cannot handle {request.method} request")
def policy(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
self.setup_class(request, full_url, headers)
if request.method == "GET":
return self._get_policy(request)
elif request.method == "POST":
return self._add_policy(request)
elif request.method == "DELETE":
return self._del_policy(request, self.querystring)
else:
raise ValueError(f"Cannot handle {request.method} request")
def configuration(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
self.setup_class(request, full_url, headers)
if request.method == "PUT":
return self._put_configuration()
if request.method == "GET":
return self._get_function_configuration()
else:
raise ValueError("Cannot handle request")
def code(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
self.setup_class(request, full_url, headers)
if request.method == "PUT":
return self._put_code()
else:
raise ValueError("Cannot handle request")
def code_signing_config(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE: # type: ignore[return]
self.setup_class(request, full_url, headers)
if request.method == "GET":
return self._get_code_signing_config()
def function_concurrency(
self, request: Any, full_url: str, headers: Any
) -> TYPE_RESPONSE:
http_method = request.method
self.setup_class(request, full_url, headers)
if http_method == "GET":
return self._get_function_concurrency()
elif http_method == "DELETE":
return self._delete_function_concurrency()
elif http_method == "PUT":
return self._put_function_concurrency()
else:
raise ValueError("Cannot handle request")
def function_url_config(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE: # type: ignore[return]
http_method = request.method
self.setup_class(request, full_url, headers)
if http_method == "DELETE":
return self._delete_function_url_config()
elif http_method == "GET":
return self._get_function_url_config()
elif http_method == "POST":
return self._create_function_url_config()
elif http_method == "PUT":
return self._update_function_url_config()
def _add_policy(self, request: Any) -> TYPE_RESPONSE:
path = request.path if hasattr(request, "path") else path_url(request.url)
function_name = unquote(path.split("/")[-2])
qualifier = self.querystring.get("Qualifier", [None])[0]
statement = self.body
statement = self.backend.add_permission(function_name, qualifier, statement)
return 200, {}, json.dumps({"Statement": json.dumps(statement)})
def _get_policy(self, request: Any) -> TYPE_RESPONSE:
path = request.path if hasattr(request, "path") else path_url(request.url)
function_name = unquote(path.split("/")[-2])
out = self.backend.get_policy(function_name)
return 200, {}, out
def _del_policy(self, request: Any, querystring: Dict[str, Any]) -> TYPE_RESPONSE:
path = request.path if hasattr(request, "path") else path_url(request.url)
function_name = unquote(path.split("/")[-3])
statement_id = path.split("/")[-1].split("?")[0]
revision = querystring.get("RevisionId", "")
if self.backend.get_function(function_name):
self.backend.remove_permission(function_name, statement_id, revision)
return 204, {}, "{}"
else:
return 404, {}, "{}"
def _invoke(self, request: Any) -> Tuple[int, Dict[str, str], Union[str, bytes]]:
response_headers: Dict[str, str] = {}
# URL Decode in case it's a ARN:
function_name = unquote(self.path.rsplit("/", 2)[-2])
qualifier = self._get_param("qualifier")
payload = self.backend.invoke(
function_name, qualifier, self.body, self.headers, response_headers
)
if payload:
if request.headers.get("X-Amz-Invocation-Type") != "Event":
if sys.getsizeof(payload) > 6000000:
response_headers["Content-Length"] = "142"
response_headers["x-amz-function-error"] = "Unhandled"
error_dict = {
"errorMessage": "Response payload size exceeded maximum allowed payload size (6291556 bytes).",
"errorType": "Function.ResponseSizeTooLarge",
}
payload = json.dumps(error_dict).encode("utf-8")
response_headers["content-type"] = "application/json"
if request.headers.get("X-Amz-Invocation-Type") == "Event":
status_code = 202
elif request.headers.get("X-Amz-Invocation-Type") == "DryRun":
status_code = 204
else:
if request.headers.get("X-Amz-Log-Type") != "Tail":
del response_headers["x-amz-log-result"]
status_code = 200
return status_code, response_headers, payload
else:
return 404, response_headers, "{}"
def _invoke_async(self) -> Tuple[int, Dict[str, str], Union[str, bytes]]:
response_headers: Dict[str, Any] = {}
function_name = unquote(self.path.rsplit("/", 3)[-3])
fn = self.backend.get_function(function_name, None)
payload = fn.invoke(self.body, self.headers, response_headers)
response_headers["Content-Length"] = str(len(payload))
return 202, response_headers, payload
def _list_functions(self) -> TYPE_RESPONSE:
querystring = self.querystring
func_version = querystring.get("FunctionVersion", [None])[0]
result: Dict[str, List[Dict[str, Any]]] = {"Functions": []}
for fn in self.backend.list_functions(func_version):
json_data = fn.get_configuration()
result["Functions"].append(json_data)
return 200, {}, json.dumps(result)
def _list_versions_by_function(self, function_name: str) -> TYPE_RESPONSE:
result: Dict[str, Any] = {"Versions": []}
functions = self.backend.list_versions_by_function(function_name)
for fn in functions:
json_data = fn.get_configuration()
result["Versions"].append(json_data)
return 200, {}, json.dumps(result)
def _create_function(self) -> TYPE_RESPONSE:
fn = self.backend.create_function(self.json_body)
config = fn.get_configuration(on_create=True)
return 201, {}, json.dumps(config)
def _create_function_url_config(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.split("/")[-2])
config = self.backend.create_function_url_config(function_name, self.json_body)
return 201, {}, json.dumps(config.to_dict())
def _delete_function_url_config(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.split("/")[-2])
self.backend.delete_function_url_config(function_name)
return 204, {}, "{}"
def _get_function_url_config(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.split("/")[-2])
config = self.backend.get_function_url_config(function_name)
return 201, {}, json.dumps(config.to_dict())
def _update_function_url_config(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.split("/")[-2])
config = self.backend.update_function_url_config(function_name, self.json_body)
return 200, {}, json.dumps(config.to_dict())
def _create_event_source_mapping(self) -> TYPE_RESPONSE:
fn = self.backend.create_event_source_mapping(self.json_body)
config = fn.get_configuration()
return 201, {}, json.dumps(config)
def _list_event_source_mappings(
self, event_source_arn: str, function_name: str
) -> TYPE_RESPONSE:
esms = self.backend.list_event_source_mappings(event_source_arn, function_name)
result = {"EventSourceMappings": [esm.get_configuration() for esm in esms]}
return 200, {}, json.dumps(result)
def _get_event_source_mapping(self, uuid: str) -> TYPE_RESPONSE:
result = self.backend.get_event_source_mapping(uuid)
if result:
return 200, {}, json.dumps(result.get_configuration())
else:
return 404, {}, "{}"
def _update_event_source_mapping(self, uuid: str) -> TYPE_RESPONSE:
result = self.backend.update_event_source_mapping(uuid, self.json_body)
if result:
return 202, {}, json.dumps(result.get_configuration())
else:
return 404, {}, "{}"
def _delete_event_source_mapping(self, uuid: str) -> TYPE_RESPONSE:
esm = self.backend.delete_event_source_mapping(uuid)
if esm:
json_result = esm.get_configuration()
json_result.update({"State": "Deleting"})
return 202, {}, json.dumps(json_result)
else:
return 404, {}, "{}"
def _publish_function(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.split("/")[-2])
description = self._get_param("Description")
fn = self.backend.publish_function(function_name, description)
config = fn.get_configuration() # type: ignore[union-attr]
return 201, {}, json.dumps(config)
def _delete_function(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/", 1)[-1])
qualifier = self._get_param("Qualifier", None)
self.backend.delete_function(function_name, qualifier)
return 204, {}, ""
@staticmethod
def _set_configuration_qualifier(configuration: Dict[str, Any], qualifier: str) -> Dict[str, Any]: # type: ignore[misc]
if qualifier is None or qualifier == "$LATEST":
configuration["Version"] = "$LATEST"
if qualifier == "$LATEST":
configuration["FunctionArn"] += ":$LATEST"
return configuration
def _get_function(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/", 1)[-1])
qualifier = self._get_param("Qualifier", None)
fn = self.backend.get_function(function_name, qualifier)
code = fn.get_code()
code["Configuration"] = self._set_configuration_qualifier(
code["Configuration"], qualifier
)
return 200, {}, json.dumps(code)
def _get_function_configuration(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/", 2)[-2])
qualifier = self._get_param("Qualifier", None)
fn = self.backend.get_function(function_name, qualifier)
configuration = self._set_configuration_qualifier(
fn.get_configuration(), qualifier
)
return 200, {}, json.dumps(configuration)
def _get_aws_region(self, full_url: str) -> str:
region = self.region_regex.search(full_url)
if region:
return region.group(1)
else:
return self.default_region
def _list_tags(self) -> TYPE_RESPONSE:
function_arn = unquote(self.path.rsplit("/", 1)[-1])
tags = self.backend.list_tags(function_arn)
return 200, {}, json.dumps({"Tags": tags})
def _tag_resource(self) -> TYPE_RESPONSE:
function_arn = unquote(self.path.rsplit("/", 1)[-1])
self.backend.tag_resource(function_arn, self.json_body["Tags"])
return 200, {}, "{}"
def _untag_resource(self) -> TYPE_RESPONSE:
function_arn = unquote(self.path.rsplit("/", 1)[-1])
tag_keys = self.querystring["tagKeys"]
self.backend.untag_resource(function_arn, tag_keys)
return 204, {}, "{}"
def _put_configuration(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/", 2)[-2])
qualifier = self._get_param("Qualifier", None)
resp = self.backend.update_function_configuration(
function_name, qualifier, body=self.json_body
)
if resp:
return 200, {}, json.dumps(resp)
else:
return 404, {}, "{}"
def _put_code(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/", 2)[-2])
qualifier = self._get_param("Qualifier", None)
resp = self.backend.update_function_code(
function_name, qualifier, body=self.json_body
)
if resp:
return 200, {}, json.dumps(resp)
else:
return 404, {}, "{}"
def _get_code_signing_config(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/", 2)[-2])
resp = self.backend.get_code_signing_config(function_name)
return 200, {}, json.dumps(resp)
def _get_function_concurrency(self) -> TYPE_RESPONSE:
path_function_name = unquote(self.path.rsplit("/", 2)[-2])
function_name = self.backend.get_function(path_function_name)
if function_name is None:
return 404, {}, "{}"
resp = self.backend.get_function_concurrency(path_function_name)
return 200, {}, json.dumps({"ReservedConcurrentExecutions": resp})
def _delete_function_concurrency(self) -> TYPE_RESPONSE:
path_function_name = unquote(self.path.rsplit("/", 2)[-2])
function_name = self.backend.get_function(path_function_name)
if function_name is None:
return 404, {}, "{}"
self.backend.delete_function_concurrency(path_function_name)
return 204, {}, "{}"
def _put_function_concurrency(self) -> TYPE_RESPONSE:
path_function_name = unquote(self.path.rsplit("/", 2)[-2])
function = self.backend.get_function(path_function_name)
if function is None:
return 404, {}, "{}"
concurrency = self._get_param("ReservedConcurrentExecutions", None)
resp = self.backend.put_function_concurrency(path_function_name, concurrency)
return 200, {}, json.dumps({"ReservedConcurrentExecutions": resp})
def _list_layers(self) -> TYPE_RESPONSE:
layers = self.backend.list_layers()
return 200, {}, json.dumps({"Layers": layers})
def _delete_layer_version(self) -> TYPE_RESPONSE:
layer_name = self.path.split("/")[-3]
layer_version = self.path.split("/")[-1]
self.backend.delete_layer_version(layer_name, layer_version)
return 200, {}, "{}"
def _get_layer_version(self) -> TYPE_RESPONSE:
layer_name = self.path.split("/")[-3]
layer_version = self.path.split("/")[-1]
layer = self.backend.get_layer_version(layer_name, layer_version)
return 200, {}, json.dumps(layer.get_layer_version())
def _get_layer_versions(self) -> TYPE_RESPONSE:
layer_name = self.path.rsplit("/", 2)[-2]
layer_versions = self.backend.get_layer_versions(layer_name)
return (
200,
{},
json.dumps(
{"LayerVersions": [lv.get_layer_version() for lv in layer_versions]}
),
)
def _publish_layer_version(self) -> TYPE_RESPONSE:
spec = self.json_body
if "LayerName" not in spec:
spec["LayerName"] = self.path.rsplit("/", 2)[-2]
layer_version = self.backend.publish_layer_version(spec)
config = layer_version.get_layer_version()
return 201, {}, json.dumps(config)
def _create_alias(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/", 2)[-2])
params = json.loads(self.body)
alias_name = params.get("Name")
description = params.get("Description", "")
function_version = params.get("FunctionVersion")
routing_config = params.get("RoutingConfig")
alias = self.backend.create_alias(
name=alias_name,
function_name=function_name,
function_version=function_version,
description=description,
routing_config=routing_config,
)
return 201, {}, json.dumps(alias.to_json())
def _delete_alias(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/")[-3])
alias_name = unquote(self.path.rsplit("/", 2)[-1])
self.backend.delete_alias(name=alias_name, function_name=function_name)
return 201, {}, "{}"
def _get_alias(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/")[-3])
alias_name = unquote(self.path.rsplit("/", 2)[-1])
alias = self.backend.get_alias(name=alias_name, function_name=function_name)
return 201, {}, json.dumps(alias.to_json())
def _update_alias(self) -> TYPE_RESPONSE:
function_name = unquote(self.path.rsplit("/")[-3])
alias_name = unquote(self.path.rsplit("/", 2)[-1])
params = json.loads(self.body)
description = params.get("Description")
function_version = params.get("FunctionVersion")
routing_config = params.get("RoutingConfig")
alias = self.backend.update_alias(
name=alias_name,
function_name=function_name,
function_version=function_version,
description=description,
routing_config=routing_config,
)
return 201, {}, json.dumps(alias.to_json())
|
{
"content_hash": "d8aa31949b95104993ce6d222704d2c8",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 141,
"avg_line_length": 41.19298245614035,
"alnum_prop": 0.601022146507666,
"repo_name": "spulec/moto",
"id": "31b7298a7cb7db31b42ec4128fb71fccf959579a",
"size": "23480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/awslambda/responses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "5983"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "1424"
},
{
"name": "Jinja",
"bytes": "2502"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "14737868"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "5515"
}
],
"symlink_target": ""
}
|
import uppsell
from . import resources
uppsell.api.add_resource(resources.CustomerResource,
r'customers$',
r'customers/(?P<id>\d*)$')
uppsell.api.add_resource(resources.CustomerAddressResource,
r'customers/(?P<customer__id>\d*)/addresses$',
r'customers/(?P<customer__id>\d*)/addresses/(?P<id>\d*)$')
uppsell.api.add_resource(resources.CardResource,
r'customers/(?P<customer__id>\d*)/cards$',
r'customers/(?P<customer__id>\d*)/cards/(?P<id>\d*)$')
uppsell.api.add_resource(resources.ProfileResource,
r'customers/(?P<customer__id>\d*)/profile$')
uppsell.api.add_resource(resources.ProductResource,
r'^products$',
r'^products/(?P<sku>[^/]*)')
uppsell.api.add_resource(resources.StoreResource,
r'stores$',
r'stores/(?P<code>[^/]*)$')
uppsell.api.add_resource(resources.ListingResource,
r'stores/(?P<store_code>[^/]*)/products$',
r'stores/(?P<store_code>[^/]*)/products/(?P<sku>[^/]*)$')
uppsell.api.add_resource(resources.CartResource,
r'stores/(?P<store_code>[^/]*)/carts$',
r'stores/(?P<store_code>[^/]*)/carts/(?P<key>[^/]*)$')
uppsell.api.add_resource(resources.CartItemResource,
r'stores/(?P<store_code>[^/]*)/carts/(?P<key>[^/]*)/items$',
r'stores/(?P<store_code>[^/]*)/carts/(?P<key>[^/]*)/items/(?P<sku>[^/]*)$')
uppsell.api.add_resource(resources.OrderResource,
r'orders$',
r'orders/(?P<id>[^/]*)$')
uppsell.api.add_resource(resources.OrderItemResource,
r'orders/(?P<id>[^/]*)/items$',
r'orders/(?P<id>[^/]*)/items/(?P<sku>[^/]*)$')
uppsell.api.add_resource(resources.OrderEventResource,
r'orders/(?P<order>[^/]*)/events$')
uppsell.api.add_resource(resources.CouponResource,
r'coupons/(?P<code>[^/]*)$')
|
{
"content_hash": "d3f610c4aa8407eb480d780ee3472d18",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 37.955555555555556,
"alnum_prop": 0.6381733021077284,
"repo_name": "upptalk/uppsell",
"id": "5798c74b8b5ad00d0f045918b9fcf99523e42662",
"size": "1755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uppsell/uppsell_api/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "892864"
}
],
"symlink_target": ""
}
|
__author__ = 'Alex Dzul'
from pyql.geo.generics import GenericGeoPlace
from pyql.interface import YQLConector
__all__ = ('State', )
YQL_TABLE = "geo.states"
class State(GenericGeoPlace):
@staticmethod
def get(**kwargs):
"""
Realiza una consulta a la base de datos de Yahoo utilizando YQL.
El valor retornado un solo elemento. Si la consulta retorna más entonces
se presentará un error de múltiples resultados encontrados.
"""
connect = YQLConector()
query = connect.make_query(YQL_TABLE, **kwargs)
response = connect.request(query)
return State.generic_get(response)
@staticmethod
def filter(**kwargs):
"""
Realiza una consulta a la base de datos de Yahoo utilizando YQL.
El valor retornado siempre será una lista de objetos tipo "Continents"
"""
connect = YQLConector()
query = connect.make_query(YQL_TABLE, **kwargs)
response = connect.request(query)
return State.generic_filter(response)
|
{
"content_hash": "a85dab4827e1ae0074f48699e442e277",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 80,
"avg_line_length": 30.852941176470587,
"alnum_prop": 0.6510962821734986,
"repo_name": "alexdzul/pyql-weather",
"id": "2356ff050bba8cec12f72dd68b137514af9a33da",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyql/geo/states.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67096"
}
],
"symlink_target": ""
}
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Find-DomainShare',
'Author': ['@harmj0y'],
'Description': ('Finds shares on machines in the domain. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Hosts to enumerate.',
'Required' : False,
'Value' : ''
},
'ComputerLDAPFilter' : {
'Description' : 'Host filter name to query AD for, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerSearchBase' : {
'Description' : 'Specifies the LDAP source to search through for computers',
'Required' : False,
'Value' : ''
},
'ComputerOperatingSystem' : {
'Description' : 'Return computers with a specific operating system, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerServicePack' : {
'Description' : 'Return computers with the specified service pack, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerSiteName' : {
'Description' : 'Return computers in the specific AD Site name, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'CheckShareAccess' : {
'Description' : 'Switch. Only display found shares that the local user has access to.',
'Required' : False,
'Value' : ''
},
'Server' : {
'Description' : 'Specifies an active directory server (domain controller) to bind to',
'Required' : False,
'Value' : ''
},
'SearchScope' : {
'Description' : 'Specifies the scope to search under, Base/OneLevel/Subtree (default of Subtree)',
'Required' : False,
'Value' : ''
},
'ResultPageSize' : {
'Description' : 'Specifies the PageSize to set for the LDAP searcher object.',
'Required' : False,
'Value' : ''
},
'ServerTimeLimit' : {
'Description' : 'Specifies the maximum amount of time the server spends searching. Default of 120 seconds.',
'Required' : False,
'Value' : ''
},
'Tombstone' : {
'Description' : 'Switch. Specifies that the search should also return deleted/tombstoned objects.',
'Required' : False,
'Value' : ''
},
'Delay' : {
'Description' : 'Delay between enumerating hosts, defaults to 0.',
'Required' : False,
'Value' : ''
},
'Jitter' : {
'Description' : 'Specifies the jitter (0-1.0) to apply to any specified -Delay, defaults to +/- 0.3.',
'Required' : False,
'Value' : ''
},
'Threads' : {
'Description' : 'The maximum concurrent threads to execute.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.strip_powershell_comments(moduleCode)
script += "\n" + moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
|
{
"content_hash": "0919c675bedf0ba3b699b1330fb3eba4",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 128,
"avg_line_length": 39.130434782608695,
"alnum_prop": 0.45365079365079364,
"repo_name": "PowerShellEmpire/Empire",
"id": "b7eb7430bb04ecd40ca90099bbefd0f114040073",
"size": "6300",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/situational_awareness/network/powerview/share_finder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "2563"
},
{
"name": "PowerShell",
"bytes": "11378828"
},
{
"name": "Python",
"bytes": "1207008"
},
{
"name": "Shell",
"bytes": "2166"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from difflib import SequenceMatcher
from consts.account_permissions import AccountPermissions
from consts.event_type import EventType
from controllers.suggestions.suggestions_review_base_controller import \
SuggestionsReviewBaseController
from database.event_query import EventListQuery
from helpers.event_manipulator import EventManipulator
from helpers.outgoing_notification_helper import OutgoingNotificationHelper
from models.event import Event
from models.suggestion import Suggestion
from template_engine import jinja2_engine
class SuggestOffseasonEventReviewController(SuggestionsReviewBaseController):
REQUIRED_PERMISSIONS = [AccountPermissions.REVIEW_OFFSEASON_EVENTS]
def __init__(self, *args, **kw):
super(SuggestOffseasonEventReviewController, self).__init__(*args, **kw)
def create_target_model(self, suggestion):
event_id = self.request.get("event_short", None)
event_key = str(self.request.get("year")) + str.lower(str(self.request.get("event_short")))
if not event_id:
# Need to supply a key :(
return 'missing_key', None
if not Event.validate_key_name(event_key):
# Bad event key generated
return 'bad_key', None
start_date = None
if self.request.get("start_date"):
start_date = datetime.strptime(self.request.get("start_date"), "%Y-%m-%d")
end_date = None
if self.request.get("end_date"):
end_date = datetime.strptime(self.request.get("end_date"), "%Y-%m-%d")
existing_event = Event.get_by_id(event_key)
if existing_event:
return 'duplicate_key', None
first_code = self.request.get("first_code", '')
event = Event(
id=event_key,
end_date=end_date,
event_short=self.request.get("event_short"),
event_type_enum=EventType.OFFSEASON,
district_key=None,
venue=self.request.get("venue"),
venue_address=self.request.get("venue_address"),
city=self.request.get("city"),
state_prov=self.request.get("state"),
country=self.request.get("country"),
name=self.request.get("name"),
short_name=self.request.get("short_name"),
start_date=start_date,
website=self.request.get("website"),
year=int(self.request.get("year")),
first_code=first_code,
official=(not first_code == ''),
)
EventManipulator.createOrUpdate(event)
author = suggestion.author.get()
OutgoingNotificationHelper.send_suggestion_result_email(
to=author.email,
subject="[TBA] Offseason Event Suggestion: {}".format(event.name),
email_body="""Dear {},
Thank you for suggesting an offseason event to The Blue Alliance. Your suggestion has been approved and you can find the event at https://www.thebluealliance.com/event/{}
If you are the event's organizer and would like to upload teams attending, match videos, or real-time match results to TBA before or during the event, you can do so using the TBA EventWizard - request auth keys here: https://www.thebluealliance.com/request/apiwrite
Thanks for helping make TBA better,
The Blue Alliance Admins
""".format(author.nickname, event_key)
)
return 'success', event_key
def was_create_success(self, ret):
return ret and ret[0] == 'success'
def get(self):
suggestions = Suggestion.query().filter(
Suggestion.review_state == Suggestion.REVIEW_PENDING).filter(
Suggestion.target_model == "offseason-event")
year = datetime.now().year
year_events_future = EventListQuery(year).fetch_async()
last_year_events_future = EventListQuery(year - 1).fetch_async()
events_and_ids = [self._create_candidate_event(suggestion) for suggestion in suggestions]
year_events = year_events_future.get_result()
year_offseason_events = [e for e in year_events if e.event_type_enum == EventType.OFFSEASON]
last_year_events = last_year_events_future.get_result()
last_year_offseason_events = [e for e in last_year_events if e.event_type_enum == EventType.OFFSEASON]
similar_events = [self._get_similar_events(event[1], year_offseason_events) for event in events_and_ids]
similar_last_year = [self._get_similar_events(event[1], last_year_offseason_events) for event in events_and_ids]
self.template_values.update({
'success': self.request.get("success"),
'event_key': self.request.get("event_key"),
'events_and_ids': events_and_ids,
'similar_events': similar_events,
'similar_last_year': similar_last_year,
})
self.response.out.write(
jinja2_engine.render('suggestions/suggest_offseason_event_review_list.html', self.template_values))
def post(self):
self.verify_permissions()
id_str = self.request.get("suggestion_id")
suggestion_id = int(id_str) if id_str.isdigit() else id_str
verdict = self.request.get("verdict")
if verdict == "accept":
status, event_key = self._process_accepted(suggestion_id)
self.redirect("/suggest/offseason/review?success={}&event_key={}".format(status, event_key))
return
elif verdict == "reject":
self._process_rejected(suggestion_id)
self.redirect("/suggest/offseason/review?success=reject")
return
self.redirect("/suggest/offseason/review")
@classmethod
def _create_candidate_event(cls, suggestion):
start_date = None
end_date = None
try:
start_date = datetime.strptime(suggestion.contents['start_date'], "%Y-%m-%d")
end_date = datetime.strptime(suggestion.contents['end_date'], "%Y-%m-%d")
except ValueError:
pass
venue = suggestion.contents['venue_name']
address = suggestion.contents['address']
city = suggestion.contents['city']
state = suggestion.contents['state']
country = suggestion.contents['country']
address = u"{}\n{}\n{}, {}, {}".format(venue, address, city, state, country)
return suggestion.key.id(), Event(
end_date=end_date,
event_type_enum=EventType.OFFSEASON,
district_key=None,
venue=venue,
city=city,
state_prov=state,
country=country,
venue_address=address,
name=suggestion.contents['name'],
start_date=start_date,
website=suggestion.contents['website'],
year=start_date.year if start_date else None,
first_code=suggestion.contents.get('first_code', None),
official=False)
@classmethod
def _get_similar_events(cls, candidate_event, offseason_events):
"""
Finds events this year with a similar name
Returns a tuple of (event key, event name)
"""
similar_events = []
for event in offseason_events:
similarity = SequenceMatcher(a=candidate_event.name, b=event.name).ratio()
if similarity > 0.5:
# Somewhat arbitrary cutoff
similar_events.append((event.key_name, event.name))
return similar_events
|
{
"content_hash": "25554d4e635359f3cb7c3a3046774623",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 265,
"avg_line_length": 42.9367816091954,
"alnum_prop": 0.6328470084326061,
"repo_name": "phil-lopreiato/the-blue-alliance",
"id": "8cc7ad923f9a560898af27f84b03e49f5f676f46",
"size": "7471",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "controllers/suggestions/suggest_offseason_event_review_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "342115"
},
{
"name": "Dockerfile",
"bytes": "1806"
},
{
"name": "HTML",
"bytes": "923112"
},
{
"name": "JavaScript",
"bytes": "519596"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "2829552"
},
{
"name": "Ruby",
"bytes": "3494"
},
{
"name": "Shell",
"bytes": "15899"
}
],
"symlink_target": ""
}
|
import copy
import operator
import warnings
import weakref
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from .column import (ASCIITNULL, FITS2NUMPY, ASCII2NUMPY, ASCII2STR, ColDefs,
_AsciiColDefs, _FormatX, _FormatP, _VLF, _get_index,
_wrapx, _unwrapx, _makep, Delayed)
from .util import decode_ascii, encode_ascii, _rstrip_inplace
from ...utils import lazyproperty
class FITS_record:
"""
FITS record class.
`FITS_record` is used to access records of the `FITS_rec` object.
This will allow us to deal with scaled columns. It also handles
conversion/scaling of columns in ASCII tables. The `FITS_record`
class expects a `FITS_rec` object as input.
"""
def __init__(self, input, row=0, start=None, end=None, step=None,
base=None, **kwargs):
"""
Parameters
----------
input : array
The array to wrap.
row : int, optional
The starting logical row of the array.
start : int, optional
The starting column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
end : int, optional
The ending column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
"""
self.array = input
self.row = row
if base:
width = len(base)
else:
width = self.array._nfields
s = slice(start, end, step).indices(width)
self.start, self.end, self.step = s
self.base = base
def __getitem__(self, key):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError("Key '{}' does not exist.".format(key))
elif isinstance(key, slice):
return type(self)(self.array, self.row, key.start, key.stop,
key.step, self)
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError('Index out of bounds')
return self.array.field(indx)[self.row]
def __setitem__(self, key, value):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError("Key '{}' does not exist.".format(key))
elif isinstance(key, slice):
for indx in range(slice.start, slice.stop, slice.step):
indx = self._get_indx(indx)
self.array.field(indx)[self.row] = value
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError('Index out of bounds')
self.array.field(indx)[self.row] = value
def __len__(self):
return len(range(self.start, self.end, self.step))
def __repr__(self):
"""
Display a single row.
"""
outlist = []
for idx in range(len(self)):
outlist.append(repr(self[idx]))
return '({})'.format(', '.join(outlist))
def field(self, field):
"""
Get the field data of the record.
"""
return self.__getitem__(field)
def setfield(self, field, value):
"""
Set the field data of the record.
"""
self.__setitem__(field, value)
@lazyproperty
def _bases(self):
bases = [weakref.proxy(self)]
base = self.base
while base:
bases.append(base)
base = base.base
return bases
def _get_index(self, indx):
indices = np.ogrid[:self.array._nfields]
for base in reversed(self._bases):
if base.step < 1:
s = slice(base.start, None, base.step)
else:
s = slice(base.start, base.end, base.step)
indices = indices[s]
return indices[indx]
class FITS_rec(np.recarray):
"""
FITS record array class.
`FITS_rec` is the data part of a table HDU's data part. This is a layer
over the `~numpy.recarray`, so we can deal with scaled columns.
It inherits all of the standard methods from `numpy.ndarray`.
"""
_record_type = FITS_record
_character_as_bytes = False
def __new__(subtype, input):
"""
Construct a FITS record array from a recarray.
"""
# input should be a record array
if input.dtype.subdtype is None:
self = np.recarray.__new__(subtype, input.shape, input.dtype,
buf=input.data)
else:
self = np.recarray.__new__(subtype, input.shape, input.dtype,
buf=input.data, strides=input.strides)
self._init()
if self.dtype.fields:
self._nfields = len(self.dtype.fields)
return self
def __setstate__(self, state):
meta = state[-1]
column_state = state[-2]
state = state[:-2]
super().__setstate__(state)
self._col_weakrefs = weakref.WeakSet()
for attr, value in zip(meta, column_state):
setattr(self, attr, value)
def __reduce__(self):
"""
Return a 3-tuple for pickling a FITS_rec. Use the super-class
functionality but then add in a tuple of FITS_rec-specific
values that get used in __setstate__.
"""
reconst_func, reconst_func_args, state = super().__reduce__()
# Define FITS_rec-specific attrs that get added to state
column_state = []
meta = []
for attrs in ['_converted', '_heapoffset', '_heapsize', '_nfields',
'_gap', '_uint', 'parnames', '_coldefs']:
with suppress(AttributeError):
# _coldefs can be Delayed, and file objects cannot be
# picked, it needs to be deepcopied first
if attrs == '_coldefs':
column_state.append(self._coldefs.__deepcopy__(None))
else:
column_state.append(getattr(self, attrs))
meta.append(attrs)
state = state + (column_state, meta)
return reconst_func, reconst_func_args, state
def __array_finalize__(self, obj):
if obj is None:
return
if isinstance(obj, FITS_rec):
self._character_as_bytes = obj._character_as_bytes
if isinstance(obj, FITS_rec) and obj.dtype == self.dtype:
self._converted = obj._converted
self._heapoffset = obj._heapoffset
self._heapsize = obj._heapsize
self._col_weakrefs = obj._col_weakrefs
self._coldefs = obj._coldefs
self._nfields = obj._nfields
self._gap = obj._gap
self._uint = obj._uint
elif self.dtype.fields is not None:
# This will allow regular ndarrays with fields, rather than
# just other FITS_rec objects
self._nfields = len(self.dtype.fields)
self._converted = {}
self._heapoffset = getattr(obj, '_heapoffset', 0)
self._heapsize = getattr(obj, '_heapsize', 0)
self._gap = getattr(obj, '_gap', 0)
self._uint = getattr(obj, '_uint', False)
self._col_weakrefs = weakref.WeakSet()
self._coldefs = ColDefs(self)
# Work around chicken-egg problem. Column.array relies on the
# _coldefs attribute to set up ref back to parent FITS_rec; however
# in the above line the self._coldefs has not been assigned yet so
# this fails. This patches that up...
for col in self._coldefs:
del col.array
col._parent_fits_rec = weakref.ref(self)
else:
self._init()
def _init(self):
"""Initializes internal attributes specific to FITS-isms."""
self._nfields = 0
self._converted = {}
self._heapoffset = 0
self._heapsize = 0
self._col_weakrefs = weakref.WeakSet()
self._coldefs = None
self._gap = 0
self._uint = False
@classmethod
def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False):
"""
Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec`
object.
.. note::
This was originally part of the ``new_table`` function in the table
module but was moved into a class method since most of its
functionality always had more to do with initializing a `FITS_rec`
object than anything else, and much of it also overlapped with
``FITS_rec._scale_back``.
Parameters
----------
columns : sequence of `Column` or a `ColDefs`
The columns from which to create the table data. If these
columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns
will be used as a template for a new table with the requested
number of rows.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If
`False`, copy the data from input, undefined cells will still
be filled with zeros/blanks.
"""
if not isinstance(columns, ColDefs):
columns = ColDefs(columns)
# read the delayed data
for column in columns:
arr = column.array
if isinstance(arr, Delayed):
if arr.hdu.data is None:
column.array = None
else:
column.array = _get_recarray_field(arr.hdu.data,
arr.field)
# Reset columns._arrays (which we may want to just do away with
# altogether
del columns._arrays
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in columns._arrays:
if arr is not None:
dim = arr.shape[0]
else:
dim = 0
if dim > nrows:
nrows = dim
raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8)
raw_data.fill(ord(columns._padding_byte))
data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls)
data._character_as_bytes = character_as_bytes
# Make sure the data is a listener for changes to the columns
columns._add_listener(data)
# Previously this assignment was made from hdu.columns, but that's a
# bug since if a _TableBaseHDU has a FITS_rec in its .data attribute
# the _TableBaseHDU.columns property is actually returned from
# .data._coldefs, so this assignment was circular! Don't make that
# mistake again.
# All of this is an artifact of the fragility of the FITS_rec class,
# and that it can't just be initialized by columns...
data._coldefs = columns
# If fill is True we don't copy anything from the column arrays. We're
# just using them as a template, and returning a table filled with
# zeros/blanks
if fill:
return data
# Otherwise we have to fill the recarray with data from the input
# columns
for idx, column in enumerate(columns):
# For each column in the ColDef object, determine the number of
# rows in that column. This will be either the number of rows in
# the ndarray associated with the column, or the number of rows
# given in the call to this function, which ever is smaller. If
# the input FILL argument is true, the number of rows is set to
# zero so that no data is copied from the original input data.
arr = column.array
if arr is None:
array_size = 0
else:
array_size = len(arr)
n = min(array_size, nrows)
# TODO: At least *some* of this logic is mostly redundant with the
# _convert_foo methods in this class; see if we can eliminate some
# of that duplication.
if not n:
# The input column had an empty array, so just use the fill
# value
continue
field = _get_recarray_field(data, idx)
name = column.name
fitsformat = column.format
recformat = fitsformat.recformat
outarr = field[:n]
inarr = arr[:n]
if isinstance(recformat, _FormatX):
# Data is a bit array
if inarr.shape[-1] == recformat.repeat:
_wrapx(inarr, outarr, recformat.repeat)
continue
elif isinstance(recformat, _FormatP):
data._cache_field(name, _makep(inarr, field, recformat,
nrows=nrows))
continue
# TODO: Find a better way of determining that the column is meant
# to be FITS L formatted
elif recformat[-2:] == FITS2NUMPY['L'] and inarr.dtype == bool:
# column is boolean
# The raw data field should be filled with either 'T' or 'F'
# (not 0). Use 'F' as a default
field[:] = ord('F')
# Also save the original boolean array in data._converted so
# that it doesn't have to be re-converted
converted = np.zeros(field.shape, dtype=bool)
converted[:n] = inarr
data._cache_field(name, converted)
# TODO: Maybe this step isn't necessary at all if _scale_back
# will handle it?
inarr = np.where(inarr == np.False_, ord('F'), ord('T'))
elif (columns[idx]._physical_values and
columns[idx]._pseudo_unsigned_ints):
# Temporary hack...
bzero = column.bzero
converted = np.zeros(field.shape, dtype=inarr.dtype)
converted[:n] = inarr
data._cache_field(name, converted)
if n < nrows:
# Pre-scale rows below the input data
field[n:] = -bzero
inarr = inarr - bzero
elif isinstance(columns, _AsciiColDefs):
# Regardless whether the format is character or numeric, if the
# input array contains characters then it's already in the raw
# format for ASCII tables
if fitsformat._pseudo_logical:
# Hack to support converting from 8-bit T/F characters
# Normally the column array is a chararray of 1 character
# strings, but we need to view it as a normal ndarray of
# 8-bit ints to fill it with ASCII codes for 'T' and 'F'
outarr = field.view(np.uint8, np.ndarray)[:n]
elif arr.dtype.kind not in ('S', 'U'):
# Set up views of numeric columns with the appropriate
# numeric dtype
# Fill with the appropriate blanks for the column format
data._cache_field(name, np.zeros(nrows, dtype=arr.dtype))
outarr = data._converted[name][:n]
outarr[:] = inarr
continue
if inarr.shape != outarr.shape:
if (inarr.dtype.kind == outarr.dtype.kind and
inarr.dtype.kind in ('U', 'S') and
inarr.dtype != outarr.dtype):
inarr_rowsize = inarr[0].size
inarr = inarr.flatten().view(outarr.dtype)
# This is a special case to handle input arrays with
# non-trivial TDIMn.
# By design each row of the outarray is 1-D, while each row of
# the input array may be n-D
if outarr.ndim > 1:
# The normal case where the first dimension is the rows
inarr_rowsize = inarr[0].size
inarr = inarr.reshape(n, inarr_rowsize)
outarr[:, :inarr_rowsize] = inarr
else:
# Special case for strings where the out array only has one
# dimension (the second dimension is rolled up into the
# strings
outarr[:n] = inarr.ravel()
else:
outarr[:] = inarr
# Now replace the original column array references with the new
# fields
# This is required to prevent the issue reported in
# https://github.com/spacetelescope/PyFITS/issues/99
for idx in range(len(columns)):
columns._arrays[idx] = data.field(idx)
return data
def __repr__(self):
# Force use of the normal ndarray repr (rather than the new
# one added for recarray in Numpy 1.10) for backwards compat
return np.ndarray.__repr__(self)
def __getitem__(self, key):
if self._coldefs is None:
return super().__getitem__(key)
if isinstance(key, str):
return self.field(key)
# Have to view as a recarray then back as a FITS_rec, otherwise the
# circular reference fix/hack in FITS_rec.field() won't preserve
# the slice.
out = self.view(np.recarray)[key]
if type(out) is not np.recarray:
# Oops, we got a single element rather than a view. In that case,
# return a Record, which has no __getstate__ and is more efficient.
return self._record_type(self, key)
# We got a view; change it back to our class, and add stuff
out = out.view(type(self))
out._coldefs = ColDefs(self._coldefs)
arrays = []
out._converted = {}
for idx, name in enumerate(self._coldefs.names):
#
# Store the new arrays for the _coldefs object
#
arrays.append(self._coldefs._arrays[idx][key])
# Ensure that the sliced FITS_rec will view the same scaled
# columns as the original; this is one of the few cases where
# it is not necessary to use _cache_field()
if name in self._converted:
dummy = self._converted[name]
field = np.ndarray.__getitem__(dummy, key)
out._converted[name] = field
out._coldefs._arrays = arrays
return out
def __setitem__(self, key, value):
if self._coldefs is None:
return super().__setitem__(key, value)
if isinstance(key, str):
self[key][:] = value
return
if isinstance(key, slice):
end = min(len(self), key.stop or len(self))
end = max(0, end)
start = max(0, key.start or 0)
end = min(end, start + len(value))
for idx in range(start, end):
self.__setitem__(idx, value[idx - start])
return
if isinstance(value, FITS_record):
for idx in range(self._nfields):
self.field(self.names[idx])[key] = value.field(self.names[idx])
elif isinstance(value, (tuple, list, np.void)):
if self._nfields == len(value):
for idx in range(self._nfields):
self.field(idx)[key] = value[idx]
else:
raise ValueError('Input tuple or list required to have {} '
'elements.'.format(self._nfields))
else:
raise TypeError('Assignment requires a FITS_record, tuple, or '
'list as input.')
def _ipython_key_completions_(self):
return self.names
def copy(self, order='C'):
"""
The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to
`numpy.copy`. Differences include that it re-views the copied array as
self's ndarray subclass, as though it were taking a slice; this means
``__array_finalize__`` is called and the copy shares all the array
attributes (including ``._converted``!). So we need to make a deep
copy of all those attributes so that the two arrays truly do not share
any data.
"""
new = super().copy(order=order)
new.__dict__ = copy.deepcopy(self.__dict__)
return new
@property
def columns(self):
"""
A user-visible accessor for the coldefs.
See https://aeon.stsci.edu/ssb/trac/pyfits/ticket/44
"""
return self._coldefs
@property
def _coldefs(self):
# This used to be a normal internal attribute, but it was changed to a
# property as a quick and transparent way to work around the reference
# leak bug fixed in https://github.com/astropy/astropy/pull/4539
#
# See the long comment in the Column.array property for more details
# on this. But in short, FITS_rec now has a ._col_weakrefs attribute
# which is a WeakSet of weakrefs to each Column in _coldefs.
#
# So whenever ._coldefs is set we also add each Column in the ColDefs
# to the weakrefs set. This is an easy way to find out if a Column has
# any references to it external to the FITS_rec (i.e. a user assigned a
# column to a variable). If the column is still in _col_weakrefs then
# there are other references to it external to this FITS_rec. We use
# that information in __del__ to save off copies of the array data
# for those columns to their Column.array property before our memory
# is freed.
return self.__dict__.get('_coldefs')
@_coldefs.setter
def _coldefs(self, cols):
self.__dict__['_coldefs'] = cols
if isinstance(cols, ColDefs):
for col in cols.columns:
self._col_weakrefs.add(col)
@_coldefs.deleter
def _coldefs(self):
try:
del self.__dict__['_coldefs']
except KeyError as exc:
raise AttributeError(exc.args[0])
def __del__(self):
try:
del self._coldefs
if self.dtype.fields is not None:
for col in self._col_weakrefs:
if col.array is not None:
col.array = col.array.copy()
# See issues #4690 and #4912
except (AttributeError, TypeError): # pragma: no cover
pass
@property
def names(self):
"""List of column names."""
if self.dtype.fields:
return list(self.dtype.names)
elif getattr(self, '_coldefs', None) is not None:
return self._coldefs.names
else:
return None
@property
def formats(self):
"""List of column FITS formats."""
if getattr(self, '_coldefs', None) is not None:
return self._coldefs.formats
return None
@property
def _raw_itemsize(self):
"""
Returns the size of row items that would be written to the raw FITS
file, taking into account the possibility of unicode columns being
compactified.
Currently for internal use only.
"""
if _has_unicode_fields(self):
total_itemsize = 0
for field in self.dtype.fields.values():
itemsize = field[0].itemsize
if field[0].kind == 'U':
itemsize = itemsize // 4
total_itemsize += itemsize
return total_itemsize
else:
# Just return the normal itemsize
return self.itemsize
def field(self, key):
"""
A view of a `Column`'s data as an array.
"""
# NOTE: The *column* index may not be the same as the field index in
# the recarray, if the column is a phantom column
column = self.columns[key]
name = column.name
format = column.format
if format.dtype.itemsize == 0:
warnings.warn(
'Field {!r} has a repeat count of 0 in its format code, '
'indicating an empty field.'.format(key))
return np.array([], dtype=format.dtype)
# If field's base is a FITS_rec, we can run into trouble because it
# contains a reference to the ._coldefs object of the original data;
# this can lead to a circular reference; see ticket #49
base = self
while (isinstance(base, FITS_rec) and
isinstance(base.base, np.recarray)):
base = base.base
# base could still be a FITS_rec in some cases, so take care to
# use rec.recarray.field to avoid a potential infinite
# recursion
field = _get_recarray_field(base, name)
if name not in self._converted:
recformat = format.recformat
# TODO: If we're now passing the column to these subroutines, do we
# really need to pass them the recformat?
if isinstance(recformat, _FormatP):
# for P format
converted = self._convert_p(column, field, recformat)
else:
# Handle all other column data types which are fixed-width
# fields
converted = self._convert_other(column, field, recformat)
# Note: Never assign values directly into the self._converted dict;
# always go through self._cache_field; this way self._converted is
# only used to store arrays that are not already direct views of
# our own data.
self._cache_field(name, converted)
return converted
return self._converted[name]
def _cache_field(self, name, field):
"""
Do not store fields in _converted if one of its bases is self,
or if it has a common base with self.
This results in a reference cycle that cannot be broken since
ndarrays do not participate in cyclic garbage collection.
"""
base = field
while True:
self_base = self
while True:
if self_base is base:
return
if getattr(self_base, 'base', None) is not None:
self_base = self_base.base
else:
break
if getattr(base, 'base', None) is not None:
base = base.base
else:
break
self._converted[name] = field
def _update_column_attribute_changed(self, column, idx, attr, old_value,
new_value):
"""
Update how the data is formatted depending on changes to column
attributes initiated by the user through the `Column` interface.
Dispatches column attribute change notifications to individual methods
for each attribute ``_update_column_<attr>``
"""
method_name = '_update_column_{0}'.format(attr)
if hasattr(self, method_name):
# Right now this is so we can be lazy and not implement updaters
# for every attribute yet--some we may not need at all, TBD
getattr(self, method_name)(column, idx, old_value, new_value)
def _update_column_name(self, column, idx, old_name, name):
"""Update the dtype field names when a column name is changed."""
dtype = self.dtype
# Updating the names on the dtype should suffice
dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1:]
def _convert_x(self, field, recformat):
"""Convert a raw table column to a bit array as specified by the
FITS X format.
"""
dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_)
_unwrapx(field, dummy, recformat.repeat)
return dummy
def _convert_p(self, column, field, recformat):
"""Convert a raw table column of FITS P or Q format descriptors
to a VLA column with the array data returned from the heap.
"""
dummy = _VLF([None] * len(self), dtype=recformat.dtype)
raw_data = self._get_raw_data()
if raw_data is None:
raise OSError(
"Could not find heap data for the {!r} variable-length "
"array column.".format(column.name))
for idx in range(len(self)):
offset = field[idx, 1] + self._heapoffset
count = field[idx, 0]
if recformat.dtype == 'a':
dt = np.dtype(recformat.dtype + str(1))
arr_len = count * dt.itemsize
da = raw_data[offset:offset + arr_len].view(dt)
da = np.char.array(da.view(dtype=dt), itemsize=count)
dummy[idx] = decode_ascii(da)
else:
dt = np.dtype(recformat.dtype)
arr_len = count * dt.itemsize
dummy[idx] = raw_data[offset:offset + arr_len].view(dt)
dummy[idx].dtype = dummy[idx].dtype.newbyteorder('>')
# Each array in the field may now require additional
# scaling depending on the other scaling parameters
# TODO: The same scaling parameters apply to every
# array in the column so this is currently very slow; we
# really only need to check once whether any scaling will
# be necessary and skip this step if not
# TODO: Test that this works for X format; I don't think
# that it does--the recformat variable only applies to the P
# format not the X format
dummy[idx] = self._convert_other(column, dummy[idx],
recformat)
return dummy
def _convert_ascii(self, column, field):
"""
Special handling for ASCII table columns to convert columns containing
numeric types to actual numeric arrays from the string representation.
"""
format = column.format
recformat = ASCII2NUMPY[format[0]]
# if the string = TNULL, return ASCIITNULL
nullval = str(column.null).strip().encode('ascii')
if len(nullval) > format.width:
nullval = nullval[:format.width]
# Before using .replace make sure that any trailing bytes in each
# column are filled with spaces, and *not*, say, nulls; this causes
# functions like replace to potentially leave gibberish bytes in the
# array buffer.
dummy = np.char.ljust(field, format.width)
dummy = np.char.replace(dummy, encode_ascii('D'), encode_ascii('E'))
null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width))
# Convert all fields equal to the TNULL value (nullval) to empty fields.
# TODO: These fields really should be conerted to NaN or something else undefined.
# Currently they are converted to empty fields, which are then set to zero.
dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy)
# always replace empty fields, see https://github.com/astropy/astropy/pull/5394
if nullval != b'':
dummy = np.where(np.char.strip(dummy) == b'', null_fill, dummy)
try:
dummy = np.array(dummy, dtype=recformat)
except ValueError as exc:
indx = self.names.index(column.name)
raise ValueError(
'{}; the header may be missing the necessary TNULL{} '
'keyword or the table contains invalid data'.format(
exc, indx + 1))
return dummy
def _convert_other(self, column, field, recformat):
"""Perform conversions on any other fixed-width column data types.
This may not perform any conversion at all if it's not necessary, in
which case the original column array is returned.
"""
if isinstance(recformat, _FormatX):
# special handling for the X format
return self._convert_x(field, recformat)
(_str, _bool, _number, _scale, _zero, bscale, bzero, dim) = \
self._get_scale_factors(column)
indx = self.names.index(column.name)
# ASCII table, convert strings to numbers
# TODO:
# For now, check that these are ASCII columns by checking the coldefs
# type; in the future all columns (for binary tables, ASCII tables, or
# otherwise) should "know" what type they are already and how to handle
# converting their data from FITS format to native format and vice
# versa...
if not _str and isinstance(self._coldefs, _AsciiColDefs):
field = self._convert_ascii(column, field)
# Test that the dimensions given in dim are sensible; otherwise
# display a warning and ignore them
if dim:
# See if the dimensions already match, if not, make sure the
# number items will fit in the specified dimensions
if field.ndim > 1:
actual_shape = field.shape[1:]
if _str:
actual_shape = actual_shape + (field.itemsize,)
else:
actual_shape = field.shape[0]
if dim == actual_shape:
# The array already has the correct dimensions, so we
# ignore dim and don't convert
dim = None
else:
nitems = reduce(operator.mul, dim)
if _str:
actual_nitems = field.itemsize
elif len(field.shape) == 1: # No repeat count in TFORMn, equivalent to 1
actual_nitems = 1
else:
actual_nitems = field.shape[1]
if nitems > actual_nitems:
warnings.warn(
'TDIM{} value {:d} does not fit with the size of '
'the array items ({:d}). TDIM{:d} will be ignored.'
.format(indx + 1, self._coldefs[indx].dims,
actual_nitems, indx + 1))
dim = None
# further conversion for both ASCII and binary tables
# For now we've made columns responsible for *knowing* whether their
# data has been scaled, but we make the FITS_rec class responsible for
# actually doing the scaling
# TODO: This also needs to be fixed in the effort to make Columns
# responsible for scaling their arrays to/from FITS native values
if not column.ascii and column.format.p_format:
format_code = column.format.p_format
else:
# TODO: Rather than having this if/else it might be nice if the
# ColumnFormat class had an attribute guaranteed to give the format
# of actual values in a column regardless of whether the true
# format is something like P or Q
format_code = column.format.format
if (_number and (_scale or _zero) and not column._physical_values):
# This is to handle pseudo unsigned ints in table columns
# TODO: For now this only really works correctly for binary tables
# Should it work for ASCII tables as well?
if self._uint:
if bzero == 2**15 and format_code == 'I':
field = np.array(field, dtype=np.uint16)
elif bzero == 2**31 and format_code == 'J':
field = np.array(field, dtype=np.uint32)
elif bzero == 2**63 and format_code == 'K':
field = np.array(field, dtype=np.uint64)
bzero64 = np.uint64(2 ** 63)
else:
field = np.array(field, dtype=np.float64)
else:
field = np.array(field, dtype=np.float64)
if _scale:
np.multiply(field, bscale, field)
if _zero:
if self._uint and format_code == 'K':
# There is a chance of overflow, so be careful
test_overflow = field.copy()
try:
test_overflow += bzero64
except OverflowError:
warnings.warn(
"Overflow detected while applying TZERO{0:d}. "
"Returning unscaled data.".format(indx + 1))
else:
field = test_overflow
else:
field += bzero
# mark the column as scaled
column._physical_values = True
elif _bool and field.dtype != bool:
field = np.equal(field, ord('T'))
elif _str:
if not self._character_as_bytes:
with suppress(UnicodeDecodeError):
field = decode_ascii(field)
if dim:
# Apply the new field item dimensions
nitems = reduce(operator.mul, dim)
if field.ndim > 1:
field = field[:, :nitems]
if _str:
fmt = field.dtype.char
dtype = ('|{}{}'.format(fmt, dim[-1]), dim[:-1])
field.dtype = dtype
else:
field.shape = (field.shape[0],) + dim
return field
def _get_heap_data(self):
"""
Returns a pointer into the table's raw data to its heap (if present).
This is returned as a numpy byte array.
"""
if self._heapsize:
raw_data = self._get_raw_data().view(np.ubyte)
heap_end = self._heapoffset + self._heapsize
return raw_data[self._heapoffset:heap_end]
else:
return np.array([], dtype=np.ubyte)
def _get_raw_data(self):
"""
Returns the base array of self that "raw data array" that is the
array in the format that it was first read from a file before it was
sliced or viewed as a different type in any way.
This is determined by walking through the bases until finding one that
has at least the same number of bytes as self, plus the heapsize. This
may be the immediate .base but is not always. This is used primarily
for variable-length array support which needs to be able to find the
heap (the raw data *may* be larger than nbytes + heapsize if it
contains a gap or padding).
May return ``None`` if no array resembling the "raw data" according to
the stated criteria can be found.
"""
raw_data_bytes = self.nbytes + self._heapsize
base = self
while hasattr(base, 'base') and base.base is not None:
base = base.base
if hasattr(base, 'nbytes') and base.nbytes >= raw_data_bytes:
return base
def _get_scale_factors(self, column):
"""Get all the scaling flags and factors for one column."""
# TODO: Maybe this should be a method/property on Column? Or maybe
# it's not really needed at all...
_str = column.format.format == 'A'
_bool = column.format.format == 'L'
_number = not (_bool or _str)
bscale = column.bscale
bzero = column.bzero
_scale = bscale not in ('', None, 1)
_zero = bzero not in ('', None, 0)
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
# column._dims gives a tuple, rather than column.dim which returns the
# original string format code from the FITS header...
dim = column._dims
return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim)
def _scale_back(self, update_heap_pointers=True):
"""
Update the parent array, using the (latest) scaled array.
If ``update_heap_pointers`` is `False`, this will leave all the heap
pointers in P/Q columns as they are verbatim--it only makes sense to do
this if there is already data on the heap and it can be guaranteed that
that data has not been modified, and there is not new data to add to
the heap. Currently this is only used as an optimization for
CompImageHDU that does its own handling of the heap.
"""
# Running total for the new heap size
heapsize = 0
for indx, name in enumerate(self.dtype.names):
column = self._coldefs[indx]
recformat = column.format.recformat
raw_field = _get_recarray_field(self, indx)
# add the location offset of the heap area for each
# variable length column
if isinstance(recformat, _FormatP):
# Irritatingly, this can return a different dtype than just
# doing np.dtype(recformat.dtype); but this returns the results
# that we want. For example if recformat.dtype is 'a' we want
# an array of characters.
dtype = np.array([], dtype=recformat.dtype).dtype
if update_heap_pointers and name in self._converted:
# The VLA has potentially been updated, so we need to
# update the array descriptors
raw_field[:] = 0 # reset
npts = [len(arr) for arr in self._converted[name]]
raw_field[:len(npts), 0] = npts
raw_field[1:, 1] = (np.add.accumulate(raw_field[:-1, 0]) *
dtype.itemsize)
raw_field[:, 1][:] += heapsize
heapsize += raw_field[:, 0].sum() * dtype.itemsize
# Even if this VLA has not been read or updated, we need to
# include the size of its constituent arrays in the heap size
# total
if isinstance(recformat, _FormatX) and name in self._converted:
_wrapx(self._converted[name], raw_field, recformat.repeat)
continue
_str, _bool, _number, _scale, _zero, bscale, bzero, _ = \
self._get_scale_factors(column)
field = self._converted.get(name, raw_field)
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero) and column._physical_values:
dummy = field.copy()
if _zero:
dummy -= bzero
if _scale:
dummy /= bscale
# This will set the raw values in the recarray back to
# their non-physical storage values, so the column should
# be mark is not scaled
column._physical_values = False
elif _str or isinstance(self._coldefs, _AsciiColDefs):
dummy = field
else:
continue
# ASCII table, convert numbers to strings
if isinstance(self._coldefs, _AsciiColDefs):
self._scale_back_ascii(indx, dummy, raw_field)
# binary table string column
elif isinstance(raw_field, chararray.chararray):
self._scale_back_strings(indx, dummy, raw_field)
# all other binary table columns
else:
if len(raw_field) and isinstance(raw_field[0],
np.integer):
dummy = np.around(dummy)
if raw_field.shape == dummy.shape:
raw_field[:] = dummy
else:
# Reshaping the data is necessary in cases where the
# TDIMn keyword was used to shape a column's entries
# into arrays
raw_field[:] = dummy.ravel().view(raw_field.dtype)
del dummy
# ASCII table does not have Boolean type
elif _bool and name in self._converted:
choices = (np.array([ord('F')], dtype=np.int8)[0],
np.array([ord('T')], dtype=np.int8)[0])
raw_field[:] = np.choose(field, choices)
# Store the updated heapsize
self._heapsize = heapsize
def _scale_back_strings(self, col_idx, input_field, output_field):
# There are a few possibilities this has to be able to handle properly
# The input_field, which comes from the _converted column is of dtype
# 'Un' so that elements read out of the array are normal str
# objects (i.e. unicode strings)
#
# At the other end the *output_field* may also be of type 'S' or of
# type 'U'. It will *usually* be of type 'S' because when reading
# an existing FITS table the raw data is just ASCII strings, and
# represented in Numpy as an S array. However, when a user creates
# a new table from scratch, they *might* pass in a column containing
# unicode strings (dtype 'U'). Therefore the output_field of the
# raw array is actually a unicode array. But we still want to make
# sure the data is encodable as ASCII. Later when we write out the
# array we use, in the dtype 'U' case, a different write routine
# that writes row by row and encodes any 'U' columns to ASCII.
# If the output_field is non-ASCII we will worry about ASCII encoding
# later when writing; otherwise we can do it right here
if input_field.dtype.kind == 'U' and output_field.dtype.kind == 'S':
try:
_ascii_encode(input_field, out=output_field)
except _UnicodeArrayEncodeError as exc:
raise ValueError(
"Could not save column '{0}': Contains characters that "
"cannot be encoded as ASCII as required by FITS, starting "
"at the index {1!r} of the column, and the index {2} of "
"the string at that location.".format(
self._coldefs[col_idx].name,
exc.index[0] if len(exc.index) == 1 else exc.index,
exc.start))
else:
# Otherwise go ahead and do a direct copy into--if both are type
# 'U' we'll handle encoding later
input_field = input_field.flatten().view(output_field.dtype)
output_field.flat[:] = input_field
# Ensure that blanks at the end of each string are
# converted to nulls instead of spaces, see Trac #15
# and #111
_rstrip_inplace(output_field)
def _scale_back_ascii(self, col_idx, input_field, output_field):
"""
Convert internal array values back to ASCII table representation.
The ``input_field`` is the internal representation of the values, and
the ``output_field`` is the character array representing the ASCII
output that will be written.
"""
starts = self._coldefs.starts[:]
spans = self._coldefs.spans
format = self._coldefs[col_idx].format
# The the index of the "end" column of the record, beyond
# which we can't write
end = super().field(-1).itemsize
starts.append(end + starts[-1])
if col_idx > 0:
lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1]
else:
lead = 0
if lead < 0:
warnings.warn('Column {!r} starting point overlaps the previous '
'column.'.format(col_idx + 1))
trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx]
if trail < 0:
warnings.warn('Column {!r} ending point overlaps the next '
'column.'.format(col_idx + 1))
# TODO: It would be nice if these string column formatting
# details were left to a specialized class, as is the case
# with FormatX and FormatP
if 'A' in format:
_pc = '{:'
else:
_pc = '{:>'
fmt = ''.join([_pc, format[1:], ASCII2STR[format[0]], '}',
(' ' * trail)])
# Even if the format precision is 0, we should output a decimal point
# as long as there is space to do so--not including a decimal point in
# a float value is discouraged by the FITS Standard
trailing_decimal = (format.precision == 0 and
format.format in ('F', 'E', 'D'))
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for jdx, value in enumerate(input_field):
value = fmt.format(value)
if len(value) > starts[col_idx + 1] - starts[col_idx]:
raise ValueError(
"Value {!r} does not fit into the output's itemsize of "
"{}.".format(value, spans[col_idx]))
if trailing_decimal and value[0] == ' ':
# We have some extra space in the field for the trailing
# decimal point
value = value[1:] + '.'
output_field[jdx] = value
# Replace exponent separator in floating point numbers
if 'D' in format:
output_field[:] = output_field.replace(b'E', b'D')
def _get_recarray_field(array, key):
"""
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects.
"""
# Numpy >= 1.10.dev recarray no longer returns chararrays for strings
# This is currently needed for backwards-compatibility and for
# automatic truncation of trailing whitespace
field = np.recarray.field(array, key)
if (field.dtype.char in ('S', 'U') and
not isinstance(field, chararray.chararray)):
field = field.view(chararray.chararray)
return field
class _UnicodeArrayEncodeError(UnicodeEncodeError):
def __init__(self, encoding, object_, start, end, reason, index):
super().__init__(encoding, object_, start, end, reason)
self.index = index
def _ascii_encode(inarray, out=None):
"""
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded.
"""
out_dtype = np.dtype(('S{0}'.format(inarray.dtype.itemsize // 4),
inarray.dtype.shape))
if out is not None:
out = out.view(out_dtype)
op_dtypes = [inarray.dtype, out_dtype]
op_flags = [['readonly'], ['writeonly', 'allocate']]
it = np.nditer([inarray, out], op_dtypes=op_dtypes,
op_flags=op_flags, flags=['zerosize_ok'])
try:
for initem, outitem in it:
outitem[...] = initem.item().encode('ascii')
except UnicodeEncodeError as exc:
index = np.unravel_index(it.iterindex, inarray.shape)
raise _UnicodeArrayEncodeError(*(exc.args + (index,)))
return it.operands[1]
def _has_unicode_fields(array):
"""
Returns True if any fields in a structured array have Unicode dtype.
"""
dtypes = (d[0] for d in array.dtype.fields.values())
return any(d.kind == 'U' for d in dtypes)
|
{
"content_hash": "191145ed624343e7c256e17c7cbefdad",
"timestamp": "",
"source": "github",
"line_count": 1336,
"max_line_length": 90,
"avg_line_length": 39.5247005988024,
"alnum_prop": 0.5557996401855885,
"repo_name": "DougBurke/astropy",
"id": "3e1bceb98aa9add0e71ede0488d47594df5e19ba",
"size": "52869",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/io/fits/fitsrec.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "367279"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Python",
"bytes": "8390850"
},
{
"name": "TeX",
"bytes": "805"
}
],
"symlink_target": ""
}
|
"""hidden hosts management
Revision ID: 4445080944ee
Revises: 695dcbd29d4f
Create Date: 2018-10-03 11:47:20.028686
"""
# revision identifiers, used by Alembic.
revision = "4445080944ee"
down_revision = "695dcbd29d4f"
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"hidden",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user", sa.String(length=256), nullable=False),
sa.Column("client", sa.String(length=4096), nullable=True),
sa.Column("server", sa.String(length=4096), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("hidden")
# ### end Alembic commands ###
|
{
"content_hash": "f09a72c2c30c89eb6fbded61ae2b7fec",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 67,
"avg_line_length": 26.515151515151516,
"alnum_prop": 0.6525714285714286,
"repo_name": "ziirish/burp-ui",
"id": "f8b4fddd369fed68ea399519307e0646f0ee58d1",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/4445080944ee_hidden_hosts_management.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7308"
},
{
"name": "Dockerfile",
"bytes": "7163"
},
{
"name": "HTML",
"bytes": "166600"
},
{
"name": "JavaScript",
"bytes": "176986"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1067898"
},
{
"name": "Shell",
"bytes": "39490"
}
],
"symlink_target": ""
}
|
import os
import unittest
from cloudfoundry_client.operations.push.validation.manifest import ManifestReader
class TestManifestReader(unittest.TestCase):
def test_empty_manifest_should_raise_exception(self):
manifest_file = os.path.join(os.path.dirname(__file__), "..", "..", "..", "fixtures", "operations", "manifest_empty.yml")
self.assertRaises(AssertionError, lambda: ManifestReader.load_application_manifests(manifest_file))
def test_manifest_should_be_read(self):
manifest_file = os.path.join(os.path.dirname(__file__), "..", "..", "..", "fixtures", "operations", "manifest.yml")
applications = ManifestReader.load_application_manifests(manifest_file)
self.assertEqual(1, len(applications))
self.assertEqual(
dict(
docker=dict(username="the-user", password="P@SsW0r$", image="some-image"),
name="the-name",
routes=[dict(route="first-route"), dict(route="second-route")],
),
applications[0],
)
def test_complex_manifest_should_be_read(self):
manifest_file = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "fixtures", "operations", "manifest_complex.yml"
)
applications = ManifestReader.load_application_manifests(manifest_file)
self.assertEqual(2, len(applications))
self.assertEqual(
dict(
name="bigapp",
buildpacks=["staticfile_buildpack"],
memory=1024,
path=os.path.abspath(os.path.join(os.path.dirname(manifest_file), "big")),
),
applications[0],
)
self.assertEqual(
dict(
name="smallapp",
buildpacks=["staticfile_buildpack"],
memory=256,
path=os.path.abspath(os.path.join(os.path.dirname(manifest_file), "small")),
),
applications[1],
)
def test_name_should_be_set(self):
manifest = dict(path="test/")
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
def test_application_should_declare_either_path_or_docker(self):
manifest = dict(name="the-name", docker=dict(), path="test/")
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
def test_application_should_declare_at_least_path_or_docker(self):
manifest = dict(name="the-name", routes=[], environment=dict())
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
def test_deprecated_entries_should_not_be_set(self):
for deprecated in ["host", "hosts", "domain", "domains", "no-hostname"]:
manifest = dict(name="the-name", path="test/")
manifest[deprecated] = "some-value"
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
def test_docker_manifest_should_declare_buildpack_or_image(self):
manifest = dict(name="the-name", docker=dict(image="some-image", buildpack="some-buildpack"))
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
def test_username_should_be_set_if_password_is(self):
manifest = dict(name="the-name", docker=dict(image="some-image", password="P@SsW0r$"))
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
def test_password_should_be_set_if_username_is(self):
manifest = dict(name="the-name", docker=dict(image="some-image", username="the-user"))
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
def test_username_and_password_are_set_when_image_is(self):
manifest = dict(name="the-name", docker=dict(username="the-user", password="P@SsW0r$"))
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
def test_routes_should_be_an_object_with_attribute(self):
manifest = dict(name="the-name", path="test/", routes=["a route"])
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
manifest = dict(name="the-name", path="test/", routes=[dict(invalid_attribute="any-value")])
self.assertRaises(AssertionError, lambda: ManifestReader._validate_application_manifest(".", manifest))
def test_valid_application_with_path_and_routes(self):
manifest = dict(name="the-name", path="test/", routes=[dict(route="first-route"), dict(route="second-route")])
ManifestReader._validate_application_manifest(".", manifest)
def test_valid_application_with_docker_and_routes(self):
manifest = dict(
docker=dict(username="the-user", password="P@SsW0r$", image="some-image"),
name="the-name",
routes=[dict(route="first-route"), dict(route="second-route")],
)
ManifestReader._validate_application_manifest(".", manifest)
def path_should_be_set_as_absolute(self):
manifest = dict(name="the-name", path="test/")
ManifestReader._validate_application_manifest(".", manifest)
self.assertEqual(os.path.abspath("test"), manifest["path"])
def test_memory_in_kb(self):
manifest = dict(memory="2048KB")
ManifestReader._convert_memory(manifest)
self.assertEqual(2, manifest["memory"])
def test_memory_in_mb(self):
manifest = dict(memory="2048MB")
ManifestReader._convert_memory(manifest)
self.assertEqual(2048, manifest["memory"])
def test_memory_in_gb(self):
manifest = dict(memory="1G")
ManifestReader._convert_memory(manifest)
self.assertEqual(1024, manifest["memory"])
|
{
"content_hash": "3c9db0028961baf97a17b37c2c96825c",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 129,
"avg_line_length": 49.733333333333334,
"alnum_prop": 0.6456099195710456,
"repo_name": "antechrestos/cf-python-client",
"id": "25995c3a171c2b816c9fccf0ca69b4138ea17cf6",
"size": "5968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/operations/push/validation/test_manifest_reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "595"
},
{
"name": "Python",
"bytes": "122987"
}
],
"symlink_target": ""
}
|
"""Tests for easydate"""
import unittest
from easydate import Datetime
from datetime import date, datetime, timedelta
base_date = Datetime(year=2014, month=1, day=1)
class EasydateTestCase(unittest.TestCase):
def setUp(self):
self.datetime = base_date
def tearDown(self):
pass
def test_object_compare(self):
# XXX : Should be compared with other object later
self.datetime == self.datetime
self.datetime != self.datetime
self.datetime > self.datetime
self.datetime < self.datetime
self.datetime <= self.datetime
self.datetime >= self.datetime
def test_datetime_converting(self):
pure_datetime = self.datetime.to_datetime
assert isinstance(pure_datetime, datetime)
def test_delta(self):
pass
def test_date_after(self):
days_after = self.datetime.date_after(days=5)
origin_delta = \
Datetime.from_datetime(self.datetime + timedelta(days=5))
assert days_after == origin_delta
def test_date_before(self):
days_before = self.datetime.date_before(days=5)
origin_delta = \
Datetime.from_datetime(self.datetime + timedelta(days=-5))
assert days_before == origin_delta
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "1f489ebfaa407da5e2b9920621dc7988",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 70,
"avg_line_length": 28.127659574468087,
"alnum_prop": 0.6391830559757943,
"repo_name": "daftshady/easydate",
"id": "94f53f704f9d9f6318be030f7db14946f53f9c9f",
"size": "1345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6083"
}
],
"symlink_target": ""
}
|
from django import template
from django.core.exceptions import ImproperlyConfigured
register = template.Library()
@register.simple_tag(takes_context=True)
def frontpage_statusbox(context):
if 'request' not in context:
raise ImproperlyConfigured('Enable the request context processor!')
request = context['request']
from frontpage.views import statusbox
return statusbox(request).content
|
{
"content_hash": "37db22a187415794b2e13aa686695cbe",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 31.846153846153847,
"alnum_prop": 0.7705314009661836,
"repo_name": "jokey2k/ShockGsite",
"id": "6880c7b77dc83761c4be069fe98f1c88df139a7a",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontpage/templatetags/frontpage_extras.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "84880"
},
{
"name": "Python",
"bytes": "262576"
}
],
"symlink_target": ""
}
|
import pytest
import capnp
import os
this_dir = os.path.dirname(__file__)
@pytest.fixture
def capability():
return capnp.load(os.path.join(this_dir, 'test_capability.capnp'))
class Server:
def __init__(self, val=1):
self.val = val
def foo_context(self, context):
extra = 0
if context.params.j:
extra = 1
context.results.x = str(context.params.i * 5 + extra + self.val)
def buz_context(self, context):
context.results.x = context.params.i.host + '_test'
class PipelineServer:
def getCap_context(self, context):
def _then(response):
context.results.s = response.x + '_foo'
context.results.outBox.cap = capability().TestInterface._new_server(Server(100))
return context.params.inCap.foo(i=context.params.n).then(_then)
def test_client_context(capability):
client = capability.TestInterface._new_client(Server())
req = client._request('foo')
req.i = 5
remote = req.send()
response = remote.wait()
assert response.x == '26'
req = client.foo_request()
req.i = 5
remote = req.send()
response = remote.wait()
assert response.x == '26'
with pytest.raises(AttributeError):
client.foo2_request()
req = client.foo_request()
with pytest.raises(Exception):
req.i = 'foo'
req = client.foo_request()
with pytest.raises(AttributeError):
req.baz = 1
def test_simple_client_context(capability):
client = capability.TestInterface._new_client(Server())
remote = client._send('foo', i=5)
response = remote.wait()
assert response.x == '26'
remote = client.foo(i=5)
response = remote.wait()
assert response.x == '26'
remote = client.foo(i=5, j=True)
response = remote.wait()
assert response.x == '27'
remote = client.foo(5)
response = remote.wait()
assert response.x == '26'
remote = client.foo(5, True)
response = remote.wait()
assert response.x == '27'
remote = client.foo(5, j=True)
response = remote.wait()
assert response.x == '27'
remote = client.buz(capability.TestSturdyRefHostId.new_message(host='localhost'))
response = remote.wait()
assert response.x == 'localhost_test'
with pytest.raises(Exception):
remote = client.foo(5, 10)
with pytest.raises(Exception):
remote = client.foo(5, True, 100)
with pytest.raises(Exception):
remote = client.foo(i='foo')
with pytest.raises(AttributeError):
remote = client.foo2(i=5)
with pytest.raises(Exception):
remote = client.foo(baz=5)
def test_pipeline_context(capability):
client = capability.TestPipeline._new_client(PipelineServer())
foo_client = capability.TestInterface._new_client(Server())
remote = client.getCap(n=5, inCap=foo_client)
outCap = remote.outBox.cap
pipelinePromise = outCap.foo(i=10)
response = pipelinePromise.wait()
assert response.x == '150'
response = remote.wait()
assert response.s == '26_foo'
class BadServer:
def __init__(self, val=1):
self.val = val
def foo_context(self, context):
context.results.x = str(context.params.i * 5 + self.val)
context.results.x2 = 5 # raises exception
def test_exception_client_context(capability):
client = capability.TestInterface._new_client(BadServer())
remote = client._send('foo', i=5)
with pytest.raises(capnp.KjException):
remote.wait()
class BadPipelineServer:
def getCap_context(self, context):
def _then(response):
context.results.s = response.x + '_foo'
context.results.outBox.cap = capability().TestInterface._new_server(Server(100))
def _error(error):
raise Exception('test was a success')
return context.params.inCap.foo(i=context.params.n).then(_then, _error)
def test_exception_chain_context(capability):
client = capability.TestPipeline._new_client(BadPipelineServer())
foo_client = capability.TestInterface._new_client(BadServer())
remote = client.getCap(n=5, inCap=foo_client)
try:
remote.wait()
except Exception as e:
assert 'test was a success' in str(e)
def test_pipeline_exception_context(capability):
client = capability.TestPipeline._new_client(BadPipelineServer())
foo_client = capability.TestInterface._new_client(BadServer())
remote = client.getCap(n=5, inCap=foo_client)
outCap = remote.outBox.cap
pipelinePromise = outCap.foo(i=10)
with pytest.raises(Exception):
loop.wait(pipelinePromise)
with pytest.raises(Exception):
remote.wait()
def test_casting_context(capability):
client = capability.TestExtends._new_client(Server())
client2 = client.upcast(capability.TestInterface)
client3 = client2.cast_as(capability.TestInterface)
with pytest.raises(Exception):
client.upcast(capability.TestPipeline)
class TailCallOrder:
def __init__(self):
self.count = -1
def getCallSequence_context(self, context):
self.count += 1
context.results.n = self.count
class TailCaller:
def __init__(self):
self.count = 0
def foo_context(self, context):
self.count += 1
tail = context.params.callee.foo_request(i=context.params.i, t='from TailCaller')
return context.tail_call(tail)
class TailCallee:
def __init__(self):
self.count = 0
def foo_context(self, context):
self.count += 1
results = context.results
results.i = context.params.i
results.t = context.params.t
results.c = capability().TestCallOrder._new_server(TailCallOrder())
def test_tail_call(capability):
callee_server = TailCallee()
caller_server = TailCaller()
callee = capability.TestTailCallee._new_client(callee_server)
caller = capability.TestTailCaller._new_client(caller_server)
promise = caller.foo(i=456, callee=callee)
dependent_call1 = promise.c.getCallSequence()
response = promise.wait()
assert response.i == 456
assert response.i == 456
dependent_call2 = response.c.getCallSequence()
dependent_call3 = response.c.getCallSequence()
result = dependent_call1.wait()
assert result.n == 0
result = dependent_call2.wait()
assert result.n == 1
result = dependent_call3.wait()
assert result.n == 2
assert callee_server.count == 1
assert caller_server.count == 1
|
{
"content_hash": "a4cdacdc0980d50ad812628e9b73a3b6",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 92,
"avg_line_length": 26.258064516129032,
"alnum_prop": 0.6491093366093366,
"repo_name": "rcrowder/pycapnp",
"id": "4f2c1113a6b156ad866b9f34d845eefd9fab3f82",
"size": "6512",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "test/test_capability_context.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1026"
},
{
"name": "C++",
"bytes": "18866"
},
{
"name": "Cap'n Proto",
"bytes": "27145"
},
{
"name": "Protocol Buffer",
"bytes": "6286"
},
{
"name": "Python",
"bytes": "354805"
},
{
"name": "Shell",
"bytes": "960"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Proposal'
db.create_table('proposals_proposal', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('conference', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.Conference'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(max_length=400)),
('abstract', self.gf('django.db.models.fields.TextField')()),
('speaker', self.gf('django.db.models.fields.related.ForeignKey')(related_name='proposals', to=orm['speakers.Speaker'])),
('submission_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.utcnow)),
('modified_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('kind', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.SessionKind'])),
('audience_level', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.AudienceLevel'])),
('duration', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.SessionDuration'])),
))
db.send_create_signal('proposals', ['Proposal'])
# Adding M2M table for field additional_speakers on 'Proposal'
db.create_table('proposals_proposal_additional_speakers', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('proposal', models.ForeignKey(orm['proposals.proposal'], null=False)),
('speaker', models.ForeignKey(orm['speakers.speaker'], null=False))
))
db.create_unique('proposals_proposal_additional_speakers', ['proposal_id', 'speaker_id'])
def backwards(self, orm):
# Deleting model 'Proposal'
db.delete_table('proposals_proposal')
# Removing M2M table for field additional_speakers on 'Proposal'
db.delete_table('proposals_proposal_additional_speakers')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'conference.audiencelevel': {
'Meta': {'object_name': 'AudienceLevel'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'conference.sessionduration': {
'Meta': {'object_name': 'SessionDuration'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'minutes': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'conference.sessionkind': {
'Meta': {'object_name': 'SessionKind'},
'closed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'proposals.proposal': {
'Meta': {'object_name': 'Proposal'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'proposal_participations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['speakers.Speaker']"}),
'audience_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.AudienceLevel']"}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
'duration': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionDuration']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionKind']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': "orm['speakers.Speaker']"}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'speakers.speaker': {
'Meta': {'object_name': 'Speaker'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'speaker_profile'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['proposals']
|
{
"content_hash": "40750a137a6e970d865be68f96e7d890",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 228,
"avg_line_length": 71.43795620437956,
"alnum_prop": 0.5721875957903341,
"repo_name": "EuroPython/djep",
"id": "14ba60dd1ccad69b1f1e18e2213c891068b141f8",
"size": "9805",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "pyconde/proposals/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "246835"
},
{
"name": "JavaScript",
"bytes": "112740"
},
{
"name": "Puppet",
"bytes": "2679"
},
{
"name": "Python",
"bytes": "1927106"
},
{
"name": "Ruby",
"bytes": "181"
},
{
"name": "Shell",
"bytes": "6515"
}
],
"symlink_target": ""
}
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class EnumerateTests(TranspileTestCase):
def test_enumerate(self):
self.assertCodeExecution("""
lst=['a','b','c','d','e']
print(list(enumerate(lst)))
lst=['a','b','c','d','e']
print(list(enumerate(lst,start=-40)))
lst=['a','b','c','d','e']
print(list(enumerate(lst,start=46)))
lst=[('a',4),'b','c',10,'e']
print(list(enumerate(lst)))
print(list(enumerate([])))
print(list(enumerate([], start=10)))
""")
def test_enumerate_invalid_start_args(self):
self.assertCodeExecution("""
try:
print(list(enumerate(['a','b','c'], start=None)))
except TypeError as err:
print(err)
try:
print(list(enumerate(['a','b','c'], start=1.5)))
except TypeError as err:
print(err)
try:
print(list(enumerate(['a','b','c'], start="start_string")))
except TypeError as err:
print(err)
""")
def test_enumerate_invalid_iterable(self):
self.assertCodeExecution("""
try:
num=10
print(list(enumerate(num, start=10)))
except TypeError as err:
print(err)
try:
print(list(enumerate()))
except TypeError as err:
print(err)
""")
class BuiltinEnumerateFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["enumerate"]
not_implemented = [
'test_bool',
'test_bytearray',
'test_bytes',
'test_class',
'test_complex',
'test_dict',
'test_float',
'test_frozenset',
'test_int',
'test_list',
'test_None',
'test_NotImplemented',
'test_range',
'test_set',
'test_slice',
'test_str',
'test_tuple',
]
|
{
"content_hash": "186db3f4676e4cfa28e34561acceb2f1",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 80,
"avg_line_length": 29.65714285714286,
"alnum_prop": 0.47928709055876684,
"repo_name": "pombredanne/voc",
"id": "ba7d61a28489b512e4805f5c343ae1a895813409",
"size": "2076",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/builtins/test_enumerate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "818744"
},
{
"name": "Python",
"bytes": "1004328"
}
],
"symlink_target": ""
}
|
import sys, os, glob, string, traceback, fnmatch, math, shutil, collections
from PIL import Image, PngImagePlugin
from .State import State
from byond.DMIH import *
import logging
class DMILoadFlags:
NoImages = 1
NoPostProcessing = 2
class DMI:
MovementTag = '\t'
def __init__(self, filename):
self.filename = filename
self.version = ''
self.states = collections.OrderedDict() # {}
self.icon_width = 32
self.icon_height = 32
self.pixels = None
self.size = ()
self.statelist = 'LOLNONE'
self.max_x = -1
self.max_y = -1
self.img = None
def make(self, makefile):
print('>>> Compiling %s -> %s' % (makefile, self.filename))
h = DMIH()
h.parse(makefile)
for node in h.tokens:
if type(node) is Variable:
if node.name == 'height':
self.icon_height = node.value
elif node.name == 'weight':
self.icon_width = node.value
elif type(node) is directives.State:
self.states[node.state.key()] = node.state
elif type(node) is directives.Import:
if node.ftype == 'dmi':
dmi = DMI(node.filedef)
dmi.extractTo("_tmp/" + os.path.basename(node.filedef))
for name in dmi.states:
self.states[name] = dmi.states[name]
def save(self, to, **kwargs):
if len(self.states) == 0:
return # Nope.
# Now build the manifest
manifest = '#BEGIN DMI'
manifest += '\nversion = 4.0'
manifest += '\n\twidth = {0}'.format(self.icon_width)
manifest += '\n\theight = {0}'.format(self.icon_height)
frames = []
fdata = []
# Sort by name because I'm autistic like that.
ordered = self.states
if kwargs.get('sort', True):
ordered = sorted(self.states.keys())
for name in ordered:
if len(self.states[name].icons) > 0:
manifest += self.states[name].genManifest()
numIcons = self.states[name].numIcons()
lenIcons = len(self.states[name].icons)
if numIcons != lenIcons:
logging.warn('numIcons={0}, len(icons)={1} in state {2}!'.format(numIcons, lenIcons, name))
# frames += self.states[name].icons
# frames.extend(self.states[name].icons)
for i in range(len(self.states[name].icons)):
fdata += ['{}[{}]'.format(self.states[name].name, i)]
frames += [self.states[name].icons[i]]
else:
logging.warn('State {0} has 0 icons.'.format(name))
manifest += '\n#END DMI'
# print(manifest)
# Next bit borrowed from DMIDE.
icons_per_row = math.ceil(math.sqrt(len(frames)))
rows = icons_per_row
if len(frames) > icons_per_row * rows:
rows += 1
sheet = Image.new('RGBA', (int((icons_per_row + 1) * self.icon_width), int(rows * self.icon_height)))
x = 0
y = 0
# for frame in frames:
# print('per_row={0}, rows={1}, size={2}'.format(icons_per_row,rows,sheet.size))
for f in range(len(frames)):
frame = frames[f]
icon = frame
if isinstance(frame, str):
icon = Image.open(frame, 'r')
box = (x * self.icon_width, y * self.icon_height)
# print('{0} -> ({1},{2}) {3} {4}'.format(f,x,y,box,fdata[f]))
sheet.paste(icon, box, icon)
x += 1
if x > icons_per_row:
y += 1
x = 0
# More borrowed from DMIDE:
# undocumented class
meta = PngImagePlugin.PngInfo()
# copy metadata into new object
reserved = ('interlace', 'gamma', 'dpi', 'transparency', 'aspect')
for k, v in sheet.info.items():
if k in reserved: continue
meta.add_text(k, v, 1)
# Only need one - Rob
meta.add_text(b'Description', manifest.encode('ascii'), 1)
# and save
sheet.save(to, 'PNG', pnginfo=meta)
# with open(to+'.txt','w') as f:
# f.write(manifest)
# logging.info('>>> {0} states saved to {1}'.format(len(frames), to))
def getDMIH(self):
o = '# DMI Header 1.0 - Generated by DMI.py'
o += self.genDMIHLine('width', self.icon_width, -1)
o += self.genDMIHLine('height', self.icon_height, -1)
for s in sorted(self.states):
o += self.states[s].genDMIH()
return o
def genDMIHLine(self, name, value, default):
if value != default:
if type(value) is list:
value = ','.join(value)
return '\n{0} = {1}'.format(name, value)
return ''
def extractTo(self, dest, suppress_post_process=False):
flags = 0
if(suppress_post_process):
flags |= DMILoadFlags.NoPostProcessing
# print('>>> Loading %s...' % self.filename)
self.loadAll(flags)
# print('>>> Extracting %s...' % self.filename)
self.extractAllStates(dest, flags)
def getFrame(self, state, direction, frame, movement=False):
state = State.MakeKey(state,movement=movement)
if state not in self.states:
return None
return self.states[state].getFrame(direction, frame)
def setFrame(self, state, direction, frame, img, movement=False):
state = State.MakeKey(state,movement=movement)
if state not in self.states:
self.states[state] = State(state)
return self.states[state].setFrame(direction, frame, img)
def getHeader(self):
img = Image.open(self.filename)
if(b'Description' not in img.info):
raise Exception("DMI Description is not in the information headers!")
return img.info[b'Description'].decode('ascii')
def setHeader(self, newHeader, dest):
img = Image.open(self.filename)
# More borrowed from DMIDE:
# undocumented class
meta = PngImagePlugin.PngInfo()
# copy metadata into new object
reserved = ('interlace', 'gamma', 'dpi', 'transparency', 'aspect', 'icc_profile')
for k, v in img.info.items():
if k in reserved: continue
# print(k, v)
meta.add_text(k, v, 1)
# Only need one - Rob
meta.add_text(b'Description', newHeader.encode('ascii'), 1)
# and save
img.save(dest + '.tmp', 'PNG', pnginfo=meta)
shutil.move(dest + '.tmp', dest)
def loadMetadata(self, flags=0):
self.load(flags | DMILoadFlags.NoImages)
def loadAll(self, flags=0):
self.load(flags)
def load(self, flags):
self.img = Image.open(self.filename)
# This is a stupid hack to work around BYOND generating indexed PNGs with unspecified transparency.
# Uncorrected, this will result in PIL(low) trying to read the colors as alpha.
if self.img.mode == 'P':
# If there's no transparency, set it to black.
if 'transparency' not in self.img.info:
logging.warn('({0}): Indexed PNG does not specify transparency! Setting black as transparency. self.img.info = {1}'.format(self.filename, repr(self.img.info)))
self.img.info['transparency'] = 0
# Always use RGBA, it causes less problems.
self.img = self.img.convert('RGBA')
self.size = self.img.size
# Sanity
if(b'Description' not in self.img.info):
raise Exception("DMI Description is not in the information headers!")
# Load pixels from image
self.pixels = self.img.load()
# Load DMI header
desc = self.img.info[b'Description'].decode('ascii')
"""
version = 4.0
width = 32
height = 32
state = "fire"
dirs = 4
frames = 1
state = "fire2"
dirs = 1
frames = 1
state = "void"
dirs = 4
frames = 4
delay = 2,2,2,2
state = "void2"
dirs = 1
frames = 4
delay = 2,2,2,2
"""
state = None
x = 0
y = 0
self.statelist = desc
ii = 0
for line in desc.split("\n"):
line = line.strip()
if line.startswith("#"):
continue
if '=' in line:
(key, value) = line.split(' = ')
key = key.strip()
value = value.strip().replace('"', '')
if key == 'version':
self.version = value
elif key == 'width':
self.icon_width = int(value)
self.max_x = self.img.size[0] / self.icon_width
elif key == 'height':
self.icon_height = int(value)
self.max_y = self.img.size[1] / self.icon_height
# print(('%s: {sz: %s,h: %d, w: %d, m_x: %d, m_y: %d}'%(self.filename,repr(img.size),self.icon_height,self.icon_width,self.max_x,self.max_y)))
elif key == 'state':
if state != None:
# print(" + %s" % (state.ToString()))
if(self.icon_width == 0 or self.icon_height == 0):
if(len(self.states) > 0):
raise SystemError("Width and height for each cell are not available.")
else:
self.icon_width = self.img.size[0]
self.max_x = 1
self.icon_height = self.img.size[1]
self.max_y = 1
elif(self.max_x == -1 or self.max_y == -1):
self.max_x = self.img.size[0] / self.icon_width
self.max_y = self.img.size[1] / self.icon_width
for _ in range(state.numIcons()):
state.positions += [(x, y)]
if (flags & DMILoadFlags.NoImages) == 0:
state.icons += [self.loadIconAt(x, y)]
x += 1
# print('%s[%d:%d] x=%d, max_x=%d' % (self.filename,ii,i,x,self.max_x))
if(x >= self.max_x):
x = 0
y += 1
self.states[state.key()] = state
# if not suppress_post_process:
# self.states[state.name].postProcess()
ii += 1
state = State(value)
elif key == 'dirs':
state.dirs = int(value)
elif key == 'frames':
state.frames = int(value)
elif key == 'loop':
state.loop = int(value)
elif key == 'rewind':
state.rewind = int(value)
elif key == 'movement':
state.movement = int(value)
elif key == 'delay':
state.delay = value.split(',')
elif key == 'hotspot':
state.hotspot = value
else:
logging.critical('Unknown key ' + key + ' (value=' + value + ')!')
sys.exit()
self.states[state.name] = state
for _ in range(state.numIcons()):
self.states[state.name].icons += [self.loadIconAt(x, y)]
x += 1
if(x >= self.max_x):
x = 0
y += 1
def extractAllStates(self, dest, flags=0):
for _, state in self.states.iteritems():
# state = State()
for i in xrange(len(state.positions)):
x, y = state.positions[i]
self.extractIconAt(state, dest, x, y, i)
if (flags & DMILoadFlags.NoPostProcessing) == 0:
self.states[state.name].postProcess()
if dest is not None:
outfolder = os.path.join(dest, os.path.basename(self.filename))
nfn = self.filename.replace('.dmi', '.dmih')
valid_chars = "-_.()[] %s%s" % (string.ascii_letters, string.digits)
nfn = ''.join(c for c in nfn if c in valid_chars)
nfn = os.path.join(outfolder, nfn)
with open(nfn, 'w') as dmih:
dmih.write(self.getDMIH())
def loadIconAt(self, sx, sy):
if(self.icon_width == 0 or self.icon_height == 0):
raise SystemError('Image is {}x{}, an invalid size.'.format(self.icon_height, self.icon_width))
# print(" X (%d,%d)"%(sx*self.icon_width,sy*self.icon_height))
icon = Image.new(self.img.mode, (self.icon_width, self.icon_height))
newpix = icon.load()
for y in range(self.icon_height):
for x in range(self.icon_width):
_x = x + (sx * self.icon_width)
_y = y + (sy * self.icon_height)
try:
pixel = self.pixels[_x, _y]
if pixel[3] == 0: continue
newpix[x, y] = pixel
except IndexError:
print("!!! Received IndexError in %s <%d,%d> = <%d,%d> + (<%d,%d> * <%d,%d>), max=<%d,%d> halting." % (self.filename, _x, _y, x, y, sx, sy, self.icon_width, self.icon_height, self.max_x, self.max_y))
print('%s: {sz: %s,h: %d, w: %d, m_x: %d, m_y: %d}' % (self.filename, repr(self.img.size), self.icon_height, self.icon_width, self.max_x, self.max_y))
print('# of cells: %d' % len(self.states))
print('Image h/w: %s' % repr(self.size))
print('--STATES:--')
print(self.statelist)
sys.exit(1)
return icon
def extractIconAt(self, state, dest, sx, sy, i=0):
icon = self.loadIconAt(sx, sy)
outfolder = os.path.join(dest, os.path.basename(self.filename))
if not os.path.isdir(outfolder):
os.makedirs(outfolder)
nfn = "{}[{}].png".format(state.name, i)
valid_chars = "-_.()[] %s%s" % (string.ascii_letters, string.digits)
nfn = ''.join(c for c in nfn if c in valid_chars)
nfn = os.path.join(outfolder, nfn)
if os.path.isfile(nfn):
os.remove(nfn)
try:
icon.save(nfn)
except SystemError as e:
print("Received SystemError, halting: %s" % traceback.format_exc(e))
print('{ih=%d,iw=%d,state=%s,dest=%s,sx=%d,sy=%d,i=%d}' % (self.icon_height, self.icon_width, state.ToString(), dest, sx, sy, i))
sys.exit(1)
return nfn
|
{
"content_hash": "18ebdb260fade7d40cc1ffbd31d6fad1",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 219,
"avg_line_length": 40.68783068783069,
"alnum_prop": 0.48185955786736023,
"repo_name": "Boggart/ByondTools",
"id": "2fd68007e1d2ccca44a39485d4e6cbd599afc1cc",
"size": "15381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byond/DMI/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DM",
"bytes": "6575"
},
{
"name": "Python",
"bytes": "292003"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.condenser_equipment_and_heat_exchangers import FluidCoolerSingleSpeed
log = logging.getLogger(__name__)
class TestFluidCoolerSingleSpeed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_fluidcoolersinglespeed(self):
pyidf.validation_level = ValidationLevel.error
obj = FluidCoolerSingleSpeed()
# alpha
var_name = "Name"
obj.name = var_name
# node
var_water_inlet_node_name = "node|Water Inlet Node Name"
obj.water_inlet_node_name = var_water_inlet_node_name
# node
var_water_outlet_node_name = "node|Water Outlet Node Name"
obj.water_outlet_node_name = var_water_outlet_node_name
# alpha
var_performance_input_method = "UFactorTimesAreaAndDesignWaterFlowRate"
obj.performance_input_method = var_performance_input_method
# real
var_design_air_flow_rate_ufactor_times_area_value = 1050000.00005
obj.design_air_flow_rate_ufactor_times_area_value = var_design_air_flow_rate_ufactor_times_area_value
# real
var_nominal_capacity = 0.0001
obj.nominal_capacity = var_nominal_capacity
# real
var_design_entering_water_temperature = 0.0001
obj.design_entering_water_temperature = var_design_entering_water_temperature
# real
var_design_entering_air_temperature = 0.0001
obj.design_entering_air_temperature = var_design_entering_air_temperature
# real
var_design_entering_air_wetbulb_temperature = 0.0001
obj.design_entering_air_wetbulb_temperature = var_design_entering_air_wetbulb_temperature
# real
var_design_water_flow_rate = 0.0001
obj.design_water_flow_rate = var_design_water_flow_rate
# real
var_design_air_flow_rate = 0.0001
obj.design_air_flow_rate = var_design_air_flow_rate
# real
var_design_air_flow_rate_fan_power = 0.0001
obj.design_air_flow_rate_fan_power = var_design_air_flow_rate_fan_power
# node
var_outdoor_air_inlet_node_name = "node|Outdoor Air Inlet Node Name"
obj.outdoor_air_inlet_node_name = var_outdoor_air_inlet_node_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.fluidcoolersinglespeeds[0].name, var_name)
self.assertEqual(idf2.fluidcoolersinglespeeds[0].water_inlet_node_name, var_water_inlet_node_name)
self.assertEqual(idf2.fluidcoolersinglespeeds[0].water_outlet_node_name, var_water_outlet_node_name)
self.assertEqual(idf2.fluidcoolersinglespeeds[0].performance_input_method, var_performance_input_method)
self.assertAlmostEqual(idf2.fluidcoolersinglespeeds[0].design_air_flow_rate_ufactor_times_area_value, var_design_air_flow_rate_ufactor_times_area_value)
self.assertAlmostEqual(idf2.fluidcoolersinglespeeds[0].nominal_capacity, var_nominal_capacity)
self.assertAlmostEqual(idf2.fluidcoolersinglespeeds[0].design_entering_water_temperature, var_design_entering_water_temperature)
self.assertAlmostEqual(idf2.fluidcoolersinglespeeds[0].design_entering_air_temperature, var_design_entering_air_temperature)
self.assertAlmostEqual(idf2.fluidcoolersinglespeeds[0].design_entering_air_wetbulb_temperature, var_design_entering_air_wetbulb_temperature)
self.assertAlmostEqual(idf2.fluidcoolersinglespeeds[0].design_water_flow_rate, var_design_water_flow_rate)
self.assertAlmostEqual(idf2.fluidcoolersinglespeeds[0].design_air_flow_rate, var_design_air_flow_rate)
self.assertAlmostEqual(idf2.fluidcoolersinglespeeds[0].design_air_flow_rate_fan_power, var_design_air_flow_rate_fan_power)
self.assertEqual(idf2.fluidcoolersinglespeeds[0].outdoor_air_inlet_node_name, var_outdoor_air_inlet_node_name)
|
{
"content_hash": "c4b936b3661718b2a5e06316f8225c56",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 160,
"avg_line_length": 49.03488372093023,
"alnum_prop": 0.7061892340526441,
"repo_name": "rbuffat/pyidf",
"id": "f240a27b41108d7bb958b8be9383479f34d8e1f4",
"size": "4217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fluidcoolersinglespeed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22271673"
}
],
"symlink_target": ""
}
|
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-server-password'
server_password_policies = [
base.create_rule_default(
BASE_POLICY_NAME,
base.RULE_ADMIN_OR_OWNER,
"Show and clear the encrypted administrative password of a server",
[
{
'method': 'GET',
'path': '/servers/{server_id}/os-server-password'
},
{
'method': 'DELETE',
'path': '/servers/{server_id}/os-server-password'
}
]),
]
def list_rules():
return server_password_policies
|
{
"content_hash": "a15a9a953a7c2f176e4a9f773141f1d8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 23.96153846153846,
"alnum_prop": 0.5313001605136437,
"repo_name": "rajalokan/nova",
"id": "d2e12a522c1102a94449914c773c62a36c760034",
"size": "1262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/policies/server_password.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
}
|
from google.cloud import network_management_v1
def sample_rerun_connectivity_test():
# Create a client
client = network_management_v1.ReachabilityServiceClient()
# Initialize request argument(s)
request = network_management_v1.RerunConnectivityTestRequest(
name="name_value",
)
# Make the request
operation = client.rerun_connectivity_test(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END networkmanagement_v1_generated_ReachabilityService_RerunConnectivityTest_sync]
|
{
"content_hash": "e24b833f835af81392e668adc0f7fc58",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 85,
"avg_line_length": 26.82608695652174,
"alnum_prop": 0.7325769854132901,
"repo_name": "googleapis/python-network-management",
"id": "97d5a00d0b7873734ffa038884dc3cd649ea7487",
"size": "2045",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/networkmanagement_v1_generated_reachability_service_rerun_connectivity_test_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "349982"
},
{
"name": "Shell",
"bytes": "30696"
}
],
"symlink_target": ""
}
|
import rospy
from geometry_msgs.msg import Pose2D, Quaternion, Twist
from std_msgs.msg import String
from lg_msg_defs.msg import ApplicationState
from rospy import ROSException
from lg_sv import NearbyPanos
import requests
import json
from interactivespaces_msgs.msg import GenericMessage
# spacenav_node -> mux_twists -> /lg_twister/twist -> handle_spacenav_msg:
# 1. change pov based on rotational axes -> /streetview/pov
# 2. check for movement -> /streetview/panoid
# /streetview/location -> handle_location_msg:
# 1. query api, publish -> /streetview/panoid
# low priority
# /streetview/metadata -> handle_metadata_msg:
# 1. update self.metadata
X_THRESHOLD = 0.50
FORWARD_THRESHOLD = 0.2
BACKWARDS_THRESHOLD = 0.2
MOVEMENT_REPEAT_DELAY = 0.6
# TODO figure out some good values here
COEFFICIENT_LOW = 0.1
COEFFICIENT_HIGH = 3
ZOOM_MIN = 40
ZOOM_MAX = 40
INITIAL_ZOOM = 40
#IDLE_TIME_UNTIL_SNAP = 1.25
SNAP_DURATION = 15.0
def clamp(val, low, high):
return min(max(val, low), high)
def wrap(val, low, high):
if val > high:
val -= (high - low)
if val < low:
val += (high - low)
return val
def mean(series):
return sum(series) / float(len(series))
class StreetviewUtils:
@staticmethod
def get_metadata_from_lat_lon(lat, lon, radius=2000):
"""
Returns a panoid if one exists within $radius meters(?) of the lat/lon
"""
# this url may change someday...
url = 'http://maps.google.com/cbk?output=json&v=4&dm=0&pm=0&ll={},{}&radius={}'
r = requests.get(url.format(lat, lon, radius))
if r.status_code != 200:
return False
content = {}
try:
content = json.loads(r.content)
except ValueError:
return False
return content
@staticmethod
def get_panoid_from_lat_lon(lat, lon, radius=2000):
content = StreetviewUtils.get_metadata_from_lat_lon(lat, lon, radius)
try:
assert content['Location']
assert content['Location']['panoId']
except AssertionError:
return False
return str(content['Location']['panoId'])
@staticmethod
def translate_server_metadata_to_client_form(metadata):
"""
The metadata we get from the webapp client looks different from
the metadata we get directly from google, we should use the webapp
style as that is where our metadata will come from most of the time
This is a stripped down metadata with just the essiential bits of
information in it
"""
assert isinstance(metadata, dict)
links = []
ret = {}
try:
for link in metadata['Links']:
links.append(
{
'heading': float(link.get('yawDeg', 0)),
'pano': link.get('panoId', '')
}
)
ret = {
'links': links,
'location': {
'latLng': {
'lat': float(metadata.get('Location', {}).get('lat', 0)),
'lng': float(metadata.get('Location', {}).get('lng', 0))
},
'description': metadata.get('Location', {}).get('description', ''),
'attribution_name': metadata.get('Data', {}).get('attribution_name', ''),
'pano': metadata.get('Location', {}).get('panoId', '')
}
}
except KeyError:
return {}
return ret
class PanoViewerServer:
def __init__(self, location_pub, panoid_pub, pov_pub, tilt_min, tilt_max,
nav_sensitivity, space_nav_interval, idle_time_until_snap,
x_threshold=X_THRESHOLD, nearby_panos=NearbyPanos(),
metadata_pub=None, zoom_max=ZOOM_MAX, zoom_min=ZOOM_MIN,
tick_rate=180, director_pub=None, server_type=""):
self.location_pub = location_pub
self.panoid_pub = panoid_pub
self.pov_pub = pov_pub
self.server_type = server_type
self.director_pub = director_pub
self.nav_sensitivity = nav_sensitivity
self.tilt_max = tilt_max
self.tilt_min = tilt_min
self.space_nav_interval = space_nav_interval
self.idle_time_until_snap = idle_time_until_snap
self.nearby_panos = nearby_panos
self.x_threshold = x_threshold
self.metadata_pub = metadata_pub
self.zoom_max = zoom_max
self.zoom_min = zoom_min
self.tick_rate = tick_rate
self.gutter_val = 0.0005
self.tick_period = 1.0 / float(self.tick_rate)
self.state = True
self.initialize_variables()
self.start_timer()
def initialize_variables(self):
self.button_down = False
self.last_nav_msg_t = 0
self.last_nongutter_nav_msg_t = 0
self.time_since_last_nav_msg = 0
self.move_forward = 0
self.move_backward = 0
self.last_metadata = dict()
self.location = Pose2D()
self.pov = Quaternion()
self.pov.w = INITIAL_ZOOM # TODO is this alright?
self.panoid = str()
self.last_twist_msg = Twist()
self.tilt_method = self.tilt_snappy
def _twist_is_in_gutter(self, twist_msg):
return (
abs(twist_msg.linear.x) < self.gutter_val and
abs(twist_msg.linear.y) < self.gutter_val and
abs(twist_msg.linear.z) < self.gutter_val and
abs(twist_msg.angular.x) < self.gutter_val and
abs(twist_msg.angular.y) < self.gutter_val and
abs(twist_msg.angular.z) < self.gutter_val
)
def _tick(self, e):
if e.last_real is None:
return
dt = (e.current_real - e.last_real).to_sec()
npov = self.project_pov(self.last_twist_msg, dt)
try:
self.pub_pov(npov)
except ROSException as error:
rospy.logwarn("Could not publish pov during _tick: %s" % error)
def start_timer(self):
if not hasattr(self, 'tick_timer') or self.tick_timer is None:
self.tick_timer = rospy.Timer(
rospy.Duration.from_sec(self.tick_period),
self._tick
)
else:
rospy.logwarn('Tried to start_timer() a running PanoViewerServer')
def pub_location(self, pose2d):
"""
Publishes new location after setting the instance variable
"""
self.location = pose2d
self.location_pub.publish(pose2d)
def handle_location_msg(self, pose2d):
"""
Grabs the new position, and finds the corresponding panoid
then publishes the new panoid
"""
self.location = pose2d
panoid = StreetviewUtils.get_panoid_from_lat_lon(self.location.x, self.location.y)
if panoid:
self.pub_panoid(panoid)
def handle_metadata_msg(self, metadata):
"""
Grabs the new metadata from a publisher
"""
self.nearby_panos.handle_metadata_msg(metadata)
def handle_raw_metadata_msg(self, msg):
metadata = json.loads(msg.data)
metadata = StreetviewUtils.translate_server_metadata_to_client_form(metadata)
if self.metadata_pub:
self.metadata_pub.publish(String(json.dumps(metadata)))
def get_metadata(self):
"""
Get the metadata from nearby panos
"""
return self.nearby_panos.get_metadata()
def get_panoid(self, *args, **kwargs):
"""
Get the current panoid
"""
return self.panoid
def pub_pov(self, pov):
"""
Publishes the new pov after setting the instance variable
"""
self.pov = pov
self.pov_pub.publish(pov)
def handle_pov_msg(self, quaternion):
"""
Grabs the new pov from a publisher
"""
self.pov = quaternion
def pub_panoid(self, panoid, pov=None):
"""
Publishes a new panoid after setting the instance variable
"""
self.generate_director_message(panoid, pov)
if pov:
self.pub_pov(pov)
self.panoid = panoid
self.nearby_panos.set_panoid(self.panoid)
def tilt_snappy(self, twist_msg, coefficient):
now = rospy.get_time()
idle_t = now - self.last_nongutter_nav_msg_t
if idle_t < self.idle_time_until_snap:
return self.tilt_not_snappy(twist_msg, coefficient)
snap_t = idle_t - self.idle_time_until_snap
tilt = self.pov.x * max(1 - (snap_t / SNAP_DURATION), 0)
return tilt
def tilt_not_snappy(self, twist_msg, coefficient):
tilt = self.pov.x - coefficient * twist_msg.angular.y * self.nav_sensitivity
return tilt
def project_pov(self, twist_msg, dt):
coefficient = dt / self.tick_period / (1.0 / 60.0 / self.tick_period)
heading = self.pov.z - coefficient * twist_msg.angular.z * self.nav_sensitivity
tilt = self.tilt_method(twist_msg, coefficient)
#zoom = self.pov.w + coefficient * twist_msg.linear.z * self.nav_sensitivity
zoom = INITIAL_ZOOM
pov_msg = Quaternion(
x=clamp(tilt, self.tilt_min, self.tilt_max),
y=0,
z=wrap(heading, 0, 360),
w=clamp(zoom, self.zoom_min, self.zoom_max),
)
return pov_msg
def handle_panoid_msg(self, panoid):
"""
Grabs the new panoid from a publisher
"""
# Nothing to do here...
if self.panoid == panoid.data:
self.nearby_panos.set_panoid(self.panoid)
return
self.generate_director_message(panoid.data)
self.panoid = panoid.data
self.nearby_panos.set_panoid(self.panoid)
# now sets up director message so we can set the state of the system
def generate_director_message(self, panoid, pov=None):
if panoid == self.panoid:
return
server_type = self.server_type
msg = GenericMessage()
msg.type = 'json'
if pov:
heading = pov.z
tilt = pov.x
else:
heading = self.pov.z
tilt = self.pov.x
message = {
"slug": "auto_generated_sv_scene",
"windows": [
{
"activity": self.server_type,
"assets": [
panoid
],
"activity_config": {
"panoid": panoid,
"heading": heading,
"tilt": tilt
}
}
]
}
msg.message = json.dumps(message)
if self.director_pub:
self.director_pub.publish(msg)
def handle_state_msg(self, app_state):
"""
Set state to true if the application is visible
"""
self.state = (app_state.state == ApplicationState.VISIBLE)
def handle_spacenav_msg(self, twist):
"""
Adjust pov based on the twist message received, also handle
a possible change of pano
"""
# ignore spacenav messages when self.state != True
if not self.state:
return
now = rospy.get_time()
self.time_since_last_nav_msg = now - self.last_nav_msg_t
self.last_nav_msg_t = now
if self._twist_is_in_gutter(twist):
self.last_twist_msg = Twist()
else:
self.last_nongutter_nav_msg_t = now
self.last_twist_msg = twist
# check to see if the pano should be moved
self.handle_possible_pano_change(twist)
def handle_possible_pano_change(self, twist):
"""
Only moves if the linear x is > or < the x_threshold and that it has
been that way for atleast {backward,forward}_threshold publications
"""
if twist.linear.x > self.x_threshold:
if (self.move_forward == 0 or
self.time_since_last_nav_msg +
self.move_forward < FORWARD_THRESHOLD):
self.move_forward += self.time_since_last_nav_msg
if self.time_since_last_nav_msg + self.move_forward > FORWARD_THRESHOLD:
self._move_forward()
elif twist.linear.x < -self.x_threshold:
if (self.move_backward == 0 or
self.time_since_last_nav_msg +
self.move_backward < BACKWARDS_THRESHOLD):
self.move_backward += self.time_since_last_nav_msg
if self.time_since_last_nav_msg + self.move_backward > BACKWARDS_THRESHOLD:
self._move_backward()
else:
# reset counters
if self.move_forward < 0:
self.move_forward += MOVEMENT_REPEAT_DELAY / 10.0
if self.move_backward < 0:
self.move_backward += MOVEMENT_REPEAT_DELAY / 10.0
self.move_forward = min(self.move_forward, 0)
self.move_backward = min(self.move_backward, 0)
def handle_joy(self, joy):
"""
Move forward if the button is down, and wasn't previously down
"""
if not self.state:
return
if 1 not in joy.buttons:
self.button_down = False
return
if self.button_down:
# button is still down
return
self.button_down = True
if joy.buttons[-1] == 1:
self._move_forward()
else:
self._move_backward()
def _move_forward(self):
"""
Wrapper around move function, resets counter
"""
if self.move(self.pov.z):
self.move_forward = -MOVEMENT_REPEAT_DELAY
self.move_backward = -MOVEMENT_REPEAT_DELAY
def _move_backward(self):
"""
Wrapper around move function, resets counter and passes an adjusted
heading
"""
if self.move((self.pov.z + 180) % 360):
self.move_backward = -MOVEMENT_REPEAT_DELAY
self.move_forward = -MOVEMENT_REPEAT_DELAY
def move(self, heading):
"""
Moves to the closest pano in the direction of the heading
"""
move_to = self.nearby_panos.find_closest(self.panoid, heading)
if not move_to:
return None # don't update anything
self.pub_panoid(move_to)
return True
def getCoefficient(self):
"""
Find the ratio of time in between nav messages and
expected interval. Clamp the result and return.
"""
coefficient = self.time_since_last_nav_msg / self.space_nav_interval
coefficient = clamp(coefficient, COEFFICIENT_LOW, COEFFICIENT_HIGH)
return coefficient
def handle_soft_relaunch(self, *args, **kwargs):
"""
Reinitialize all variables
"""
rospy.logdebug('handling soft relaunch for streetview')
self.initialize_variables()
self.nearby_panos.handle_soft_relaunch()
def handle_tilt_snappy(self, msg):
if msg.data:
self.tilt_method = self.tilt_snappy
else:
self.tilt_method = self.tilt_not_snappy
|
{
"content_hash": "3a8918b1a0fff42c285ca79e9c88c208",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 93,
"avg_line_length": 33.60879120879121,
"alnum_prop": 0.5650667015432906,
"repo_name": "EndPointCorp/lg_ros_nodes",
"id": "25c8445c9dcb27f157be1a146352dd9fdddcb113",
"size": "15292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lg_sv/src/lg_sv/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28157"
},
{
"name": "C++",
"bytes": "291289"
},
{
"name": "CMake",
"bytes": "26675"
},
{
"name": "Dockerfile",
"bytes": "15931"
},
{
"name": "HTML",
"bytes": "29662"
},
{
"name": "JavaScript",
"bytes": "430737"
},
{
"name": "Makefile",
"bytes": "4197"
},
{
"name": "Python",
"bytes": "1144931"
},
{
"name": "Shell",
"bytes": "17851"
}
],
"symlink_target": ""
}
|
from Bio import SeqIO
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
from collections import OrderedDict
import os
import yaml
import pprint
import math
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import requests
class NewSeqToAlignment():
def __init__(self, path_to_master_file, path_to_master_file2=None):
self.path_to_master_file = path_to_master_file
self.aligned_seq_len = 0
self.aligned_seqs, self.seqs, self.master_uniprots = self.parse_input(path_to_master_file)
self.query_seqs = OrderedDict()
if path_to_master_file2:
self.aligned_seqs2, self.seqs2, self.master_uniprots2 = self.parse_input(path_to_master_file2)
# def run_add(self, path_to_file, path_to_query):
# # self.parse_input(path_to_file, path_to_query)
# for q_id, q_seq in self.query_seqs.items():
# ref_id, ref_score, ref_alignment = self.find_closest(q_id, q_seq)
# print(ref_id, ref_score, q_id)
# self.align_to_ref(ref_id, q_id, q_seq)
# break
def add_to_master(self, entries_to_add):
with open(self.path_to_master_file, 'a') as f:
for i, j in entries_to_add.items():
f.write('>{}\n{}\n'.format(i,j))
def run_edit(self, path_to_file, ref_id, cutoff=0.5):
list_to_edit = []
query_align, query_no_gaps = self.parse_input(path_to_file)
for key, seq in query_align.items():
if ref_id in key:
ref_key = key
ref_seq = seq
break
refseq_len = len(ref_seq.replace('-',''))
ref_gaps = len(ref_seq)-refseq_len
c=0
for i, j in query_align.items():
if len(j.replace('-',''))-ref_gaps<refseq_len*cutoff:
if '/' in i:
i = i.split('/')[0]
list_to_edit.append(i)
c+=1
return list_to_edit
def parse_input(self, path_to_file, check_seqlen=False):
aligned_seqs, seqs = OrderedDict(), OrderedDict()
for i, val in enumerate(SeqIO.parse(path_to_file, "fasta")):
if check_seqlen:
if i>0 and len(val.seq)!=self.aligned_seq_len:
print('Warning: aligned sequence length difference: {} and {}'.format(self.aligned_seq_len, len(val.seq)))
self.aligned_seq_len = len(val.seq)
aligned_seqs[val.id] = str(val.seq)
for i, j in aligned_seqs.items():
seqs[i] = j.replace('-','')
uniprots = self.get_uniprots(seqs)
return aligned_seqs, seqs, uniprots
def get_uniprots(self, seqs):
uniprot_list = {}
for i, j in seqs.items():
if '|' in i:
split1 = i.split('|')
if len(split1[0])<6 and len(split1[1])>=6:
uni_key = split1[1]
uniprot_list[uni_key] = i
elif '.' in i:
split2 = i.split('.')
if len(split2[0])>=6:
uni_key = split2[0]
uniprot_list[uni_key] = i
else:
uniprot_list[i] = i
if len(seqs)!=len(uniprot_list):
raise AssertionError('Error: Parsing issues with UniProt accessions in master file')
return uniprot_list
def find_closest(self, query_id, query_seq):
best_score = 0
best_id = None
best_alignment = None
for ref_id, ref_seq in self.seqs.items():
pw = pairwise2.align.localms(ref_seq, query_seq, 3, 1, -3, -.1)
score = pw[0][2]
if score>best_score:
best_score = score
best_id = ref_id
best_alignment = pw
return best_id, best_score, best_alignment
def remove_from_master(self, keys_to_remove):
out = OrderedDict()
for i, j in self.aligned_seqs.items():
if i not in keys_to_remove:
out[i] = j
return out
def write_to_file(self, out_file, dict):
with open(out_file, 'w') as f:
for i, j in dict.items():
f.write('>{}\n{}\n'.format(i,j))
def align_to_ref(self, ref_seq, query_seq, ident_score=4, sim_score=2, gap_open=-2, gap_ext=-.5, verbose=False):
pw = pairwise2.align.localms(ref_seq, query_seq, ident_score, sim_score, gap_open, gap_ext)
score = pw[0][2]
if verbose:
print(format_alignment(*pw[0]))
print(score)
print(self.aligned_seq_len, len(pw[0][1]))
return score
def check_content_in_master(self, file_to_compare):
new_set_with_gaps, new_set = self.parse_input(file_to_compare)
missing_from_master = OrderedDict()
c=0
for i, j in new_set.items():
orig_key = i
if '.' in i:
i = i.split('.')[0]
if i not in self.master_uniprots:
missing_from_master[orig_key] = j
c+=1
print('Found {} entries missing from master'.format(c))
return missing_from_master
def filter(self, input_f):
if type(input_f)==type('') and os.path.exists(input_f):
f_aligned, f_seqs, f_uniprots = self.parse_input(input_f)
else:
f_uniprots = input_f
print(f_uniprots)
o1, o2 = OrderedDict(), OrderedDict()
for i, j in f_uniprots.items():
if i in self.master_uniprots:
o1[i] = self.seqs[self.master_uniprots[i]]
elif i in self.master_uniprots2:
o2[i] = self.seqs2[self.master_uniprots2[i]]
return o1, o2
def ref_sim_matrix(self, refs=[], files=[], out_file=None):
sim_matrix = {'References': refs}
ref_seqs = []
parsed_files = []
for f in files:
if f==self.path_to_master_file:
for r in refs:
if r in self.master_uniprots:
if self.master_uniprots[r] in self.seqs:
ref_seqs.append(self.seqs[self.master_uniprots[r]])
parsed_files.append([self.aligned_seqs, self.seqs, self.master_uniprots])
else:
parsed_aligned, parsed_seqs, parsed_uniprots = self.parse_input(f)
for r in refs:
if r in parsed_uniprots:
if parsed_uniprots[r] in parsed_seqs:
ref_seqs.append(parsed_seqs[parsed_uniprots[r]])
parsed_files.append([parsed_aligned, parsed_seqs, parsed_uniprots])
if len(refs)==len(ref_seqs):
print('{} Reference sequences found'.format(len(ref_seqs)))
for f in parsed_files:
for key, seq in f[1].items():
sim_matrix[key] = []
for i, ref in enumerate(ref_seqs):
if refs[i] in f[2] and f[2][refs[i]]==key:
continue
score = self.align_to_ref(ref, seq)
sim_matrix[key].append(score)
mean = sum(sim_matrix[key])/len(sim_matrix[key])
variance = sum([((x - mean) ** 2) for x in sim_matrix[key]]) / len(sim_matrix[key])
res = variance ** 0.5
sim_matrix[key].append(mean)
sim_matrix[key].append(res)
if out_file:
with open(out_file, 'w') as f:
yaml.dump(sim_matrix, f, indent=4, default_flow_style=False)
# with open(out_file, 'r') as f:
# yf = yaml.load(f, Loader=yaml.FullLoader)
# pprint.pprint(yf)
# print(sim_matrix)
return sim_matrix
def sim_matrix_two_sets(self, out_file, one_file=False):
startTime = datetime.now()
print(startTime, 'Running...')
length = len(self.seqs)
progress_percentages = [1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90]
c=1
if one_file:
with open(out_file, 'w') as f:
for i, j in self.seqs.items():
if round(c/length*100) in progress_percentages:
print(datetime.now() - startTime, 'Progress: {}%'.format(round(c/length*100)))
if len(progress_percentages)>1:
progress_percentages = progress_percentages[1:]
else:
progress_percentages = []
for k, l in self.seqs2.items():
score = self.align_to_ref(j, l)
f.write('{},{},{}\n'.format(i, k, score))
c+=1
else:
for i, j in self.seqs.items():
if round(c/length*100) in progress_percentages:
print(datetime.now() - startTime, 'Progress: {}%'.format(round(c/length*100)))
if len(progress_percentages)>1:
progress_percentages = progress_percentages[1:]
else:
progress_percentages = []
with open('./classd/sim_matrix/{}_to_ste3.csv'.format(i), 'w') as f:
for k, l in self.seqs2.items():
score = self.align_to_ref(j, l)
f.write('{},{},{}\n'.format(i, k, score))
c+=1
def process_sim_matrix(self, sim_matrix, score_cutoff=None, sd_cutoff=None):
if os.path.exists(sim_matrix):
with open(sim_matrix, 'r') as f:
sim_matrix = yaml.load(f, Loader=yaml.FullLoader)
data = []
for i, j in sim_matrix.items():
if i=='References':
continue
else:
data.append([i, round(j[-2]), round(j[-1])]+j[:-2])
sorted_data = sorted(data, key=lambda x: (-x[1], x[2]))
filtered_data = OrderedDict()
for i in sorted_data:
# print(i[0],i[1],i[2],i[3])
if score_cutoff and sd_cutoff:
if i[1]>=score_cutoff and i[2]<=sd_cutoff:
try:
filtered_data[i[0]] = self.seqs[i[0]]
except:
filtered_data[i[0]] = self.seqs2[i[0]]
else:
try:
filtered_data[i[0]] = self.seqs[i[0]]
except:
filtered_data[i[0]] = self.seqs2[i[0]]
return filtered_data
def remove_duplicates(self, in_file):
out = {}
with open(in_file, 'r') as f:
lines = f.readlines()
for l in lines:
if l not in out:
out[l.replace('\n','')] = ''
return out
def check_on_uniprot(self, uniprots):
for i in uniprots:
x = requests.get('https://www.uniprot.org/uniprot/{}.xml'.format(i))
lines = x.text.split('\n')
try:
if 'Swiss' in lines[2]:
print(i, lines[2])
except:
print(i, 'Error')
###STE2
# nsta = NewSeqToAlignment('./classd/Uniprot_STE2_IPR000366_yeast.fasta')
### Add to master
# nsta.run_add('./classd/Focused_STE2.fa','./classd/STE2_to_add.fa')
### Remove based on occupancy cutoff
# list_to_edit = nsta.run_edit('./classd/ste2_occupancy.fasta', 'D6VTK4', cutoff=0.5)
# print('Removed {} entries'.format(len(list_to_edit)))
# print(list_to_edit)
# out = nsta.remove_from_master(list_to_edit)
# nsta.write_to_file('./classd/ste2_occupancy_50_cutoff.fasta', out)
###STE3
# nsta = NewSeqToAlignment('./classd/Uniprot_STE3_IPR001499_yeast.fasta')
### Compare and add to master
# missing = nsta.check_content_in_master('./classd/A0A5C3N0L3_blast.txt')
# nsta.add_to_master(missing)
### Remove based on occupany cutoff
# list_to_edit = nsta.run_edit('./classd/ste3_occupancy_all.fasta', '', cutoff=0.5)
# print('Removed {} entries'.format(len(list_to_edit)))
# print(list_to_edit)
# out = nsta.remove_from_master(list_to_edit)
# nsta.write_to_file('./classd/ste3_occupancy_50_cutoff.fasta', out)
### Combining
# nsta = NewSeqToAlignment('./classd/Uniprot_STE2_IPR000366_yeast.fasta','./classd/Uniprot_STE3_IPR001499_yeast.fasta')
# nsta.ref_sim_matrix(['D6VTK4','P06783'],['./classd/Uniprot_STE2_IPR000366_yeast.fasta','./classd/Uniprot_STE3_IPR001499_yeast.fasta'], './classd/sim_matrix.yaml')
# filtered = nsta.process_sim_matrix('./classd/sim_matrix.yaml', 1000, 50)
# nsta.write_to_file('./classd/filtered_1000_50.fasta',filtered)
# nsta.sim_matrix_two_sets('./classd/all_to_all.csv')
# o = nsta.filter('./classd/combined_smallest_full_tree.fasta')
# nsta.write_to_file('./classd/ste2_filtered.fasta', o[0])
# nsta.write_to_file('./classd/ste3_filtered.fasta', o[1])
### Combining filtered
# nsta = NewSeqToAlignment('./classd/ste2_filtered.fasta', './classd/ste3_filtered.fasta')
# nsta.sim_matrix_two_sets('./classd/all_to_all.csv', one_file=True)
# ste2 = nsta.remove_duplicates('./classd/ste2_filtered_v1.txt')
# ste3 = nsta.remove_duplicates('./classd/ste3_filtered_v1.txt')
# ste2_filter = nsta.filter(ste2)[0]
# ste3_filter = nsta.filter(ste3)[1]
# nsta.write_to_file('./classd/ste2+ste3_filtered.fasta', ste2_filter)
# nsta.write_to_file('./classd/ste2+ste3_filtered2.fasta', ste3_filter)
# nsta = NewSeqToAlignment('./classd/ste2+ste3_109and108_aligned_v2.fasta')
# nsta.write_to_file('./classd/ste2+ste3_109and108_v2.fasta', nsta.seqs)
### Check on UniProt
nsta = NewSeqToAlignment('./classd/ste2+ste3_109and108_v2.fasta')
nsta.check_on_uniprot(nsta.master_uniprots)
|
{
"content_hash": "e145c38742f826b32b55048b2512adfc",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 164,
"avg_line_length": 34.090090090090094,
"alnum_prop": 0.6513389711064129,
"repo_name": "protwis/protwis",
"id": "b3955f533f918f0eac988869e50d682940e00453",
"size": "11352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alignment/new_sequence_to_alignment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167612"
},
{
"name": "HTML",
"bytes": "2477269"
},
{
"name": "JavaScript",
"bytes": "3119217"
},
{
"name": "Promela",
"bytes": "467"
},
{
"name": "Python",
"bytes": "4289933"
}
],
"symlink_target": ""
}
|
import json
from werkzeug.wrappers import Response, Request as WRequest
class JSONResponse(Response):
def __init__(self, data, *args, **kwargs):
kwargs['content_type'] = 'application/json'
return super(JSONResponse, self).__init__(json.dumps(data), *args, **kwargs)
class Request(WRequest):
"""
Request with an extra .json() method.
"""
# This is copied from
# http://werkzeug.pocoo.org/docs/0.10/request_data/#how-to-extend-parsing,
# but adapted to have a method instead of an attribute, so parse errors
# don't get masked and show up as AttributeError.
# accept up to 4MB of transmitted data.
max_content_length = 1024 * 1024 * 4
def json(self):
if self.headers.get('content-type') == 'application/json':
return json.loads(self.data)
else:
from spa.exceptions import JSONBadRequest
raise JSONBadRequest('Expected Content-Type application/json, not %s'
% self.headers.get('content-type'))
|
{
"content_hash": "043ad9e99ce2cc8c59ce3f7236c5b3da",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 84,
"avg_line_length": 33.67741935483871,
"alnum_prop": 0.6369731800766284,
"repo_name": "dmonroy/spa",
"id": "cec4935d4c469c0aea46826cd736e98066bcb2fe",
"size": "1044",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spa/wrappers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1806"
},
{
"name": "HTML",
"bytes": "37683"
},
{
"name": "JavaScript",
"bytes": "62"
},
{
"name": "Python",
"bytes": "39425"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from django.contrib.gis.utils import LayerMapping
from boundaries.models import Country, country_ibge_mapping
class Command(BaseCommand):
help = 'Import village layer'
def add_arguments(self, parser):
parser.add_argument('shapefile_path', nargs='+', type=str)
def handle(self, *args, **options):
shapefile_path = options['shapefile_path'][0]
lm = LayerMapping(Country, shapefile_path, country_ibge_mapping,
transform=True, encoding='iso-8859-1')
lm.save(strict=True, verbose=True)
self.stdout.write('Camada de países importada com sucesso! Caminho do arquivo fornecido: "%s"' % shapefile_path)
|
{
"content_hash": "091c50bd0833753f387e06392b7bd403",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 120,
"avg_line_length": 36.55,
"alnum_prop": 0.6894664842681258,
"repo_name": "hacklabr/geodjango-boundaries",
"id": "f422d51f8410786343b20ab0b4cf7d5069d6d714",
"size": "732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boundaries/management/commands/import_countries_layer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1250"
},
{
"name": "Python",
"bytes": "14452"
}
],
"symlink_target": ""
}
|
import datetime
try:
import models
except:
from . import models
CART_ID = 'CART-ID'
class ItemAlreadyExists(Exception):
pass
class ItemDoesNotExist(Exception):
pass
class Cart:
def __init__(self, request):
cart_id = request.session.get(CART_ID)
if cart_id:
try:
cart = models.Cart.objects.get(id=cart_id, checked_out=False)
except models.Cart.DoesNotExist:
cart = self.new(request)
else:
cart = self.new(request)
self.cart = cart
def __iter__(self):
for item in self.cart.item_set.all():
yield item
def new(self, request):
cart = models.Cart(creation_date=datetime.datetime.now())
cart.save()
request.session[CART_ID] = cart.id
return cart
def add(self, product, unit_price, quantity=1):
try:
item = models.Item.objects.get(
cart=self.cart,
product=product,
)
except models.Item.DoesNotExist:
item = models.Item()
item.cart = self.cart
item.product = product
item.unit_price = unit_price
item.quantity = quantity
item.save()
else: #ItemAlreadyExists
item.unit_price = unit_price
item.quantity = item.quantity + int(quantity)
item.save()
def remove(self, product):
try:
item = models.Item.objects.get(
cart=self.cart,
product=product,
)
except models.Item.DoesNotExist:
raise ItemDoesNotExist
else:
item.delete()
def update(self, product, quantity, unit_price=None):
try:
item = models.Item.objects.get(
cart=self.cart,
product=product,
)
except models.Item.DoesNotExist:
raise ItemDoesNotExist
else: #ItemAlreadyExists
if quantity == 0:
item.delete()
else:
item.unit_price = unit_price
item.quantity = int(quantity)
item.save()
def count(self):
result = 0
for item in self.cart.item_set.all():
result += 1 * item.quantity
return result
def summary(self):
result = 0
for item in self.cart.item_set.all():
result += item.total_price
return result
def clear(self):
for item in self.cart.item_set.all():
item.delete()
|
{
"content_hash": "d50db234eadc18fe28e3d8a5a18e7d4b",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 77,
"avg_line_length": 26.835051546391753,
"alnum_prop": 0.52362658470995,
"repo_name": "thodoris/djangoPharma",
"id": "f59ae4fe73814276327ed4aa6fd233a076a0547e",
"size": "2603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoPharma/env/Lib/site-packages/cart/cart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90256"
},
{
"name": "HTML",
"bytes": "104080"
},
{
"name": "JavaScript",
"bytes": "7713"
},
{
"name": "Python",
"bytes": "68512"
}
],
"symlink_target": ""
}
|
"""Tests for adb."""
import cStringIO
import struct
import unittest
import adb_commands
import adb_protocol
import common_stub
BANNER = 'blazetest'
LOCAL_ID = 1
REMOTE_ID = 2
class BaseAdbTest(unittest.TestCase):
@classmethod
def _ExpectWrite(cls, usb, command, arg0, arg1, data):
usb.ExpectWrite(cls._MakeHeader(command, arg0, arg1, data))
usb.ExpectWrite(data)
if command == 'WRTE':
cls._ExpectRead(usb, 'OKAY', 0, 0)
@classmethod
def _ExpectRead(cls, usb, command, arg0, arg1, data=''):
usb.ExpectRead(cls._MakeHeader(command, arg0, arg1, data))
if data:
usb.ExpectRead(data)
if command == 'WRTE':
cls._ExpectWrite(usb, 'OKAY', LOCAL_ID, REMOTE_ID, '')
@classmethod
def _ConvertCommand(cls, command):
return sum(ord(c) << (i * 8) for i, c in enumerate(command))
@classmethod
def _MakeHeader(cls, command, arg0, arg1, data):
command = cls._ConvertCommand(command)
magic = command ^ 0xFFFFFFFF
checksum = adb_protocol.AdbMessage.CalculateChecksum(data)
return struct.pack('<6I', command, arg0, arg1, len(data), checksum, magic)
@classmethod
def _ExpectConnection(cls, usb):
cls._ExpectWrite(usb, 'CNXN', 0x01000000, 4096, 'host::%s\0' % BANNER)
cls._ExpectRead(usb, 'CNXN', 0, 0, 'device::\0')
@classmethod
def _ExpectOpen(cls, usb, service):
cls._ExpectWrite(usb, 'OPEN', LOCAL_ID, 0, service)
cls._ExpectRead(usb, 'OKAY', REMOTE_ID, LOCAL_ID)
@classmethod
def _ExpectClose(cls, usb):
cls._ExpectRead(usb, 'CLSE', REMOTE_ID, 0)
cls._ExpectWrite(usb, 'CLSE', LOCAL_ID, REMOTE_ID, '')
@classmethod
def _Connect(cls, usb):
return adb_commands.AdbCommands.Connect(usb, BANNER)
class AdbTest(BaseAdbTest):
@classmethod
def _ExpectCommand(cls, service, command, *responses):
usb = common_stub.StubUsb()
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, '%s:%s\0' % (service, command))
for response in responses:
cls._ExpectRead(usb, 'WRTE', REMOTE_ID, 0, response)
cls._ExpectClose(usb)
return usb
def testConnect(self):
usb = common_stub.StubUsb()
self._ExpectConnection(usb)
adb_commands.AdbCommands.Connect(usb, BANNER)
def testSmallResponseShell(self):
command = 'keepin it real'
response = 'word.'
usb = self._ExpectCommand('shell', command, response)
adb_commands = self._Connect(usb)
self.assertEqual(response, adb_commands.Shell(command))
def testBigResponseShell(self):
command = 'keepin it real big'
# The data doesn't have to be big, the point is that it just concatenates
# the data from different WRTEs together.
responses = ['other stuff, ', 'and some words.']
usb = self._ExpectCommand('shell', command, *responses)
adb_commands = self._Connect(usb)
self.assertEqual(''.join(responses), adb_commands.Shell(command))
def testReboot(self):
usb = self._ExpectCommand('reboot', '', '')
adb_commands = self._Connect(usb)
adb_commands.Reboot()
def testRebootBootloader(self):
usb = self._ExpectCommand('reboot', 'bootloader', '')
adb_commands = self._Connect(usb)
adb_commands.RebootBootloader()
def testRemount(self):
usb = self._ExpectCommand('remount', '', '')
adb_commands = self._Connect(usb)
adb_commands.Remount()
def testRoot(self):
usb = self._ExpectCommand('root', '', '')
adb_commands = self._Connect(usb)
adb_commands.Root()
class FilesyncAdbTest(BaseAdbTest):
@classmethod
def _MakeSyncHeader(cls, command, *int_parts):
command = cls._ConvertCommand(command)
return struct.pack('<%dI' % (len(int_parts) + 1), command, *int_parts)
@classmethod
def _MakeWriteSyncPacket(cls, command, data='', size=None):
return cls._MakeSyncHeader(command, size or len(data)) + data
@classmethod
def _ExpectSyncCommand(cls, write_commands, read_commands):
usb = common_stub.StubUsb()
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, 'sync:\0')
while write_commands or read_commands:
if write_commands:
command = write_commands.pop(0)
cls._ExpectWrite(usb, 'WRTE', LOCAL_ID, REMOTE_ID, command)
if read_commands:
command = read_commands.pop(0)
cls._ExpectRead(usb, 'WRTE', REMOTE_ID, LOCAL_ID, command)
cls._ExpectClose(usb)
return usb
def testPush(self):
filedata = 'alo there, govnah'
mtime = 100
send = [
self._MakeWriteSyncPacket('SEND', '/data,33272'),
self._MakeWriteSyncPacket('DATA', filedata),
self._MakeWriteSyncPacket('DONE', size=mtime),
]
data = 'OKAY\0\0\0\0'
usb = self._ExpectSyncCommand([''.join(send)], [data])
adb_commands = self._Connect(usb)
adb_commands.Push(cStringIO.StringIO(filedata), '/data', mtime=mtime)
def testPull(self):
filedata = "g'ddayta, govnah"
recv = self._MakeWriteSyncPacket('RECV', '/data')
data = [
self._MakeWriteSyncPacket('DATA', filedata),
self._MakeWriteSyncPacket('DONE'),
]
usb = self._ExpectSyncCommand([recv], [''.join(data)])
adb_commands = self._Connect(usb)
self.assertEqual(filedata, adb_commands.Pull('/data'))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7cfe103245e8e6f9c3460d913d40db10",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 78,
"avg_line_length": 28.87292817679558,
"alnum_prop": 0.6593953310371221,
"repo_name": "8l/connectal",
"id": "758b6ea1ebddc6fd5ee931aaaf38e6a3dbd79224",
"size": "5822",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/adb/adb_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Bluespec",
"bytes": "2019607"
},
{
"name": "C",
"bytes": "289795"
},
{
"name": "C++",
"bytes": "638869"
},
{
"name": "Cuda",
"bytes": "5931"
},
{
"name": "Makefile",
"bytes": "75664"
},
{
"name": "Matlab",
"bytes": "478"
},
{
"name": "Objective-C++",
"bytes": "2303"
},
{
"name": "Protocol Buffer",
"bytes": "596"
},
{
"name": "Python",
"bytes": "558681"
},
{
"name": "QMake",
"bytes": "115"
},
{
"name": "Shell",
"bytes": "22281"
},
{
"name": "SystemVerilog",
"bytes": "9142"
},
{
"name": "Tcl",
"bytes": "90784"
},
{
"name": "Verilog",
"bytes": "100214"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import subprocess
import os
class CommandWrapper(object):
"""
Parameters
----------
command : str
Command (e.g. ls, mkdir or ffmpeg)
exists_params : iterable, optional
Parameters passed to command when checking its existence.
This parameters was introduced to deal with commands that
do real things even when no parameters are given (e.g. vobcopy needs
parameter -h to avoid copying DVD)
"""
def __init__(self, command, exists_params=None):
super(CommandWrapper, self).__init__()
# make sure exists_params is a (potentially empty) list
if exists_params is None:
exists_params = []
exists_params = list(exists_params)
if self.command_exists(command, exists_params):
self.command = command
else:
message = 'Could not find command "{command}"'
raise ValueError(message.format(command=command))
@classmethod
def command_exists(cls, command, exists_params):
"""Check whether `command` exists
>>> CommandWrapper.command_exists('ls')
True
>>> CommandWrapper.command_exists('sl')
False
>>> CommandWrapper.command_exists('/bin/ls')
True
"""
try:
with open(os.devnull, mode='w') as _:
subprocess.Popen(
[command] + exists_params,
stdout=_, stderr=_
).communicate()
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
return True
def run_command(self, options=None, env=None):
"""
Parameters
----------
options : iterable, optional
env : dict, optional
"""
if options is None:
options = []
else:
options = list(options)
cmd = [self.command] + options
try:
with open(os.devnull, mode='w') as _:
subprocess.check_call(cmd, stderr=_, stdout=_, env=env)
except Exception as e:
raise e # TODO: better handling
def get_output(self, options=None, env=None):
"""
Parameters
----------
options : iterable, optional
env : dict, optional
"""
if options is None:
options = []
else:
options = list(options)
cmd = [self.command] + options
try:
with open(os.devnull, mode='w') as _:
stdout = subprocess.check_output(cmd, stderr=_, env=env)
except Exception as e:
raise e # TODO: better handling
return stdout
|
{
"content_hash": "94fa64459025cb7f8d597de0392f6ad2",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 76,
"avg_line_length": 26.718446601941746,
"alnum_prop": 0.5363372093023255,
"repo_name": "tvd-dataset/tvd",
"id": "acc1eea6769407d0ad8e2dfe1c8c9c0383792f2f",
"size": "3965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tvd/rip/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133146"
}
],
"symlink_target": ""
}
|
import collections
import mock
from oslo_utils import timeutils
from oslo_versionedobjects import fixture
from nova import exception
from nova.network import model as network_model
from nova.notifications import base as notification_base
from nova.notifications.objects import base as notification
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import test
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel as uuids
class TestNotificationBase(test.NoDBTestCase):
@base.NovaObjectRegistry.register_if(False)
class TestObject(base.NovaObject):
VERSION = '1.0'
fields = {
'field_1': fields.StringField(),
'field_2': fields.IntegerField(),
'not_important_field': fields.IntegerField(),
'lazy_field': fields.IntegerField()
}
def obj_load_attr(self, attrname):
if attrname == 'lazy_field':
self.lazy_field = 42
else:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
@base.NovaObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
'lazy_field': ('source_field', 'lazy_field')
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
# filled by the schema
'field_1': fields.StringField(nullable=True),
'field_2': fields.IntegerField(), # filled by the schema
'lazy_field': fields.IntegerField() # filled by the schema
}
def populate_schema(self, source_field):
super(TestNotificationBase.TestNotificationPayload,
self).populate_schema(source_field=source_field)
@base.NovaObjectRegistry.register_if(False)
class TestNotificationPayloadEmptySchema(
notification.NotificationPayloadBase):
VERSION = '1.0'
fields = {
'extra_field': fields.StringField(), # filled by ctor
}
@notification.notification_sample('test-update-1.json')
@notification.notification_sample('test-update-2.json')
@base.NovaObjectRegistry.register_if(False)
class TestNotification(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayload')
}
@base.NovaObjectRegistry.register_if(False)
class TestNotificationEmptySchema(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayloadEmptySchema')
}
fake_service = {
'created_at': timeutils.utcnow().replace(microsecond=0),
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuids.service,
'host': 'fake-host',
'binary': 'nova-fake',
'topic': 'fake-service-topic',
'report_count': 1,
'forced_down': False,
'disabled': False,
'disabled_reason': None,
'last_seen_up': None,
'version': 1}
expected_payload = {
'nova_object.name': 'TestNotificationPayload',
'nova_object.data': {
'extra_field': 'test string',
'field_1': 'test1',
'field_2': 42,
'lazy_field': 42},
'nova_object.version': '1.0',
'nova_object.namespace': 'nova'}
def setUp(self):
super(TestNotificationBase, self).setUp()
with mock.patch('nova.db.service_update') as mock_db_service_update:
self.service_obj = objects.Service(context=mock.sentinel.context,
id=self.fake_service['id'])
self.service_obj.obj_reset_changes(['version'])
mock_db_service_update.return_value = self.fake_service
self.service_obj.save()
self.my_obj = self.TestObject(field_1='test1',
field_2=42,
not_important_field=13)
self.payload = self.TestNotificationPayload(
extra_field='test string')
self.payload.populate_schema(source_field=self.my_obj)
self.notification = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE,
phase=fields.NotificationPhase.START),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
def _verify_notification(self, mock_notifier, mock_context,
expected_event_type,
expected_payload):
mock_notifier.prepare.assert_called_once_with(
publisher_id='nova-fake:fake-host')
mock_notify = mock_notifier.prepare.return_value.info
self.assertTrue(mock_notify.called)
self.assertEqual(mock_notify.call_args[0][0], mock_context)
self.assertEqual(mock_notify.call_args[1]['event_type'],
expected_event_type)
actual_payload = mock_notify.call_args[1]['payload']
self.assertJsonEqual(expected_payload, actual_payload)
@mock.patch('nova.rpc.LEGACY_NOTIFIER')
@mock.patch('nova.rpc.NOTIFIER')
def test_emit_notification(self, mock_notifier, mock_legacy):
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
self.notification.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update.start',
expected_payload=self.expected_payload)
self.assertFalse(mock_legacy.called)
@mock.patch('nova.rpc.NOTIFIER')
def test_emit_with_host_and_binary_as_publisher(self, mock_notifier):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(host='fake-host',
binary='nova-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
@mock.patch('nova.rpc.LEGACY_NOTIFIER')
@mock.patch('nova.rpc.NOTIFIER')
def test_emit_event_type_without_phase(self, mock_notifier, mock_legacy):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
self.assertFalse(mock_legacy.called)
@mock.patch('nova.rpc.NOTIFIER')
def test_not_possible_to_emit_if_not_populated(self, mock_notifier):
non_populated_payload = self.TestNotificationPayload(
extra_field='test string')
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
self.assertRaises(AssertionError, noti.emit, mock_context)
self.assertFalse(mock_notifier.called)
def test_lazy_load_source_field(self):
my_obj = self.TestObject(field_1='test1',
field_2=42,
not_important_field=13)
payload = self.TestNotificationPayload(extra_field='test string')
payload.populate_schema(my_obj)
self.assertEqual(42, payload.lazy_field)
def test_uninited_source_field_defaulted_to_none(self):
my_obj = self.TestObject(field_2=42,
not_important_field=13)
payload = self.TestNotificationPayload(extra_field='test string')
payload.populate_schema(my_obj)
self.assertIsNone(payload.field_1)
def test_uninited_source_field_not_nullable_payload_field_fails(self):
my_obj = self.TestObject(field_1='test1',
not_important_field=13)
payload = self.TestNotificationPayload(extra_field='test string')
self.assertRaises(ValueError, payload.populate_schema, my_obj)
@mock.patch('nova.rpc.NOTIFIER')
def test_empty_schema(self, mock_notifier):
non_populated_payload = self.TestNotificationPayloadEmptySchema(
extra_field='test string')
noti = self.TestNotificationEmptySchema(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=
{'nova_object.name': 'TestNotificationPayloadEmptySchema',
'nova_object.data': {'extra_field': u'test string'},
'nova_object.version': '1.0',
'nova_object.namespace': 'nova'})
def test_sample_decorator(self):
self.assertEqual(2, len(self.TestNotification.samples))
self.assertIn('test-update-1.json', self.TestNotification.samples)
self.assertIn('test-update-2.json', self.TestNotification.samples)
@mock.patch('nova.notifications.objects.base.NotificationBase._emit')
@mock.patch('nova.rpc.NOTIFIER')
def test_payload_is_not_generated_if_notifier_is_not_enabled(
self, mock_notifier, mock_emit):
mock_notifier.is_enabled.return_value = False
payload = self.TestNotificationPayload(
extra_field='test string')
self.payload.populate_schema(source_field=self.my_obj)
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=payload)
mock_context = mock.Mock()
noti.emit(mock_context)
self.assertFalse(payload.populated)
self.assertFalse(mock_emit.called)
@mock.patch('nova.notifications.objects.base.NotificationBase._emit')
def test_payload_is_not_generated_if_notification_format_is_unversioned(
self, mock_emit):
self.flags(notification_format='unversioned', group='notifications')
payload = self.TestNotificationPayload(
extra_field='test string')
self.payload.populate_schema(source_field=self.my_obj)
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=payload)
mock_context = mock.Mock()
noti.emit(mock_context)
self.assertFalse(payload.populated)
self.assertFalse(mock_emit.called)
notification_object_data = {
'AggregateNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'AggregatePayload': '1.1-1eb9adcc4440d8627de6ec37c6398746',
'AuditPeriodPayload': '1.0-2b429dd307b8374636703b843fa3f9cb',
'BandwidthPayload': '1.0-ee2616a7690ab78406842a2b68e34130',
'EventType': '1.5-ffa6d332f4462c45a2a363356a14165f',
'ExceptionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'ExceptionPayload': '1.0-27db46ee34cd97e39f2643ed92ad0cc5',
'FlavorNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'FlavorPayload': '1.3-6335e626893d7df5f96f87e6731fef56',
'InstanceActionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionPayload': '1.2-b7b2481bcd0e1edcc1970ef7150df5aa',
'InstanceActionVolumeNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionVolumePayload': '1.0-20c0dca4cfaf1a68d3e8c45e5aca3907',
'InstanceActionVolumeSwapNotification':
'1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionVolumeSwapPayload': '1.2-d7925b763e0795f8e5c1aa0e95bd67bd',
'InstancePayload': '1.2-a1988f6fe728bd4b478353a85c48ad55',
'InstanceStateUpdatePayload': '1.0-07e111c0fa0f6db0f79b0726d593e3da',
'InstanceUpdateNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceUpdatePayload': '1.3-5bf5f18ed1232b1d8884fa784b77728f',
'IpPayload': '1.0-8ecf567a99e516d4af094439a7632d34',
'NotificationPublisher': '1.0-bbbc1402fb0e443a3eb227cc52b61545',
'ServiceStatusNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'ServiceStatusPayload': '1.1-7b6856bd879db7f3ecbcd0ca9f35f92f',
}
class TestNotificationObjectVersions(test.NoDBTestCase):
def setUp(self):
super(TestNotificationObjectVersions, self).setUp()
base.NovaObjectRegistry.register_notification_objects()
def test_versions(self):
checker = fixture.ObjectVersionChecker(
test_objects.get_nova_objects())
notification_object_data.update(test_objects.object_data)
expected, actual = checker.test_hashes(notification_object_data,
extra_data_func=get_extra_data)
self.assertEqual(expected, actual,
'Some notification objects have changed; please make '
'sure the versions have been bumped, and then update '
'their hashes here.')
def test_notification_payload_version_depends_on_the_schema(self):
@base.NovaObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
checker = fixture.ObjectVersionChecker(
{'TestNotificationPayload': (TestNotificationPayload,)})
old_hash = checker.get_hashes(extra_data_func=get_extra_data)
TestNotificationPayload.SCHEMA['field_3'] = ('source_field',
'field_3')
new_hash = checker.get_hashes(extra_data_func=get_extra_data)
self.assertNotEqual(old_hash, new_hash)
def get_extra_data(obj_class):
extra_data = tuple()
# Get the SCHEMA items to add to the fingerprint
# if we are looking at a notification
if issubclass(obj_class, notification.NotificationPayloadBase):
schema_data = collections.OrderedDict(
sorted(obj_class.SCHEMA.items()))
extra_data += (schema_data,)
return extra_data
class TestInstanceNotification(test.NoDBTestCase):
def setUp(self):
super(TestInstanceNotification, self).setUp()
self.test_keys = ['memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'swap']
self.flavor_values = {k: 123 for k in self.test_keys}
instance_values = {k: 456 for k in self.test_keys}
flavor = objects.Flavor(flavorid='test-flavor', name='test-flavor',
disabled=False, projects=[], is_public=True,
extra_specs={}, **self.flavor_values)
info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo())
self.instance = objects.Instance(
flavor=flavor,
info_cache=info_cache,
metadata={},
uuid=uuids.instance1,
locked=False,
auto_disk_config=False,
**instance_values)
self.payload = {
'bandwidth': {},
'audit_period_ending': timeutils.utcnow(),
'audit_period_beginning': timeutils.utcnow(),
}
@mock.patch('nova.notifications.objects.instance.'
'InstanceUpdateNotification._emit')
def test_send_version_instance_update_uses_flavor(self, mock_emit):
# instance.update notification needs some tags value to avoid lazy-load
self.instance.tags = objects.TagList()
# Make sure that the notification payload chooses the values in
# instance.flavor.$value instead of instance.$value
notification_base._send_versioned_instance_update(
mock.MagicMock(),
self.instance,
self.payload,
'host',
'compute')
payload = mock_emit.call_args_list[0][1]['payload']['nova_object.data']
flavor_payload = payload['flavor']['nova_object.data']
data = {k: flavor_payload[k] for k in self.test_keys}
self.assertEqual(self.flavor_values, data)
@mock.patch('nova.rpc.NOTIFIER')
@mock.patch('nova.notifications.objects.instance.'
'InstanceUpdatePayload.__init__', return_value=None)
@mock.patch('nova.notifications.objects.instance.'
'InstanceUpdateNotification.__init__', return_value=None)
def test_send_versioned_instance_notification_is_not_called_disabled(
self, mock_notification, mock_payload, mock_notifier):
mock_notifier.is_enabled.return_value = False
notification_base._send_versioned_instance_update(
mock.MagicMock(),
self.instance,
self.payload,
'host',
'compute')
self.assertFalse(mock_payload.called)
self.assertFalse(mock_notification.called)
@mock.patch('nova.notifications.objects.instance.'
'InstanceUpdatePayload.__init__', return_value=None)
@mock.patch('nova.notifications.objects.instance.'
'InstanceUpdateNotification.__init__', return_value=None)
def test_send_versioned_instance_notification_is_not_called_unversioned(
self, mock_notification, mock_payload):
self.flags(notification_format='unversioned', group='notifications')
notification_base._send_versioned_instance_update(
mock.MagicMock(),
self.instance,
self.payload,
'host',
'compute')
self.assertFalse(mock_payload.called)
self.assertFalse(mock_notification.called)
|
{
"content_hash": "09a51cc0dd4e1644ef14c529c8416136",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 79,
"avg_line_length": 40.618,
"alnum_prop": 0.6253385198680388,
"repo_name": "rajalokan/nova",
"id": "d0e815f5e83ec796701ce3493999c41678965a20",
"size": "20906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/notifications/objects/test_notification.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
}
|
import json
import keys
def api_msg_render(headers, body, method, api_url):
#
# If rendering API message is enabled this takes a collection of variables and builds a dict containing API usage info.
# input:
# headers - dict of headers
# body - dict for body; supports --form, -F, group, user_policy, video, or None
# method - HTTP Request Method or None
# api_url - URL for API call
# output:
# APi message dict: {"headers":headers,
# "body":body,
# "method":method,
# "api_url":api_url,
# "curl":curl}
# or None
#
if keys.render_API:
# Init cURL command
output_curl = "curl "
# Build Headers
for h in headers.keys():
output_curl += "-H \'" + h + ": " + headers[h]+"\' "
# Append Body
if body:
if body.has_key("--form"):
output_curl += " --form \'" + body["--form"] +"\' "
if body.has_key("-F"):
output_curl += " -F \"" + body["-F"] +"\" "
if body.has_key("group"):
# start formatting
output_curl += " -d \'{\"group\": {"
# render group objects
body_data = body["group"]
# NOTE: do this MANUALLY as -F and --form order matters in this corner case
for x in body_data.keys():
output_curl += "\"" + x + "\":"
# if int skip ""
if isinstance(body_data[x], (int, long)):
output_curl += str(body_data[x]) +","
else:
output_curl += "\""+str(body_data[x]) +"\","
# trim extra ","
if output_curl.endswith(','):
output_curl = output_curl[:-1]
output_curl += "}}\' "
if body.has_key("user_policy"):
# start formatting
output_curl += " -d \'{\"user_policy\": {"
body_data = body["user_policy"]
output_curl += api_body_render(body_data)
output_curl += "}}\' "
if body.has_key("video"):
# start formatting
output_curl += " -d \'{\"video\": {"
body_data = body["video"]
output_curl += api_body_render(body_data)
output_curl += "}}\' "
# Append Request
if method:
if method.lower() != "get":
output_curl += "-X "+method+" "
# Append API URL
output_curl += "-i " + api_url
# Build output dict
final_output = {
"headers":headers,
"body":body,
"method":method,
"api_url":api_url,
"curl":output_curl
}
return final_output
else:
return None
def api_body_render(api_data):
#
# Takes a generic API body JSON message and converts it into a usable cURL string
# input:
# api_data - Generic JSON formatted API Body value
# output:
# cURL Body Valur (str)
#
output_data = ""
if api_data:
for x in api_data.keys():
output_data += json.dumps(x) + ": " + json.dumps(api_data[x])+","
if output_data.endswith(','):
output_data = output_data[:-1]
return output_data
def api_parse_csv(base_csv):
#
# Takes a comma separated string of integers, parses them, and returns a list of integers, an empty list if no data, or None on error
# input:
# base_csv - a string of comma separated integers
# output:
# output_data - list of integers, and empty list if no data provided, or None on error
#
base_list = str(base_csv).split(',')
output_list = []
for x in base_list:
try:
output_list.append(int(x))
except:
# Value not integer; about CSV parse
return None
return output_list
|
{
"content_hash": "566857cf39f3706b72433c24b7ddd425",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 134,
"avg_line_length": 27.74137931034483,
"alnum_prop": 0.5997513983840895,
"repo_name": "ReelDx/medvidio-webapp-demo",
"id": "f4b6dfced4281b1549f2a95e4361a4eb0bda9786",
"size": "3218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/api_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12255"
},
{
"name": "Python",
"bytes": "69555"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from pos.models.shift import Shift
from pos.models.stock import Category, Discount, FoodLog, Ingredient, Item, ItemIngredient, Order, OrderLine
from pos.models.sumup import SumUpAPIKey, SumUpCard, SumUpOnline, SumUpTerminal, SumUpTransaction
from pos.models.user import CreditUpdate, User, GeekeventsToken
from pos.models.printer import Printer
class CreditUpdateAdmin(admin.ModelAdmin):
readonly_fields = ('timestamp', 'amount', 'user', 'updated_by_user', 'geekevents_id')
pass
class DiscountAdmin(admin.ModelAdmin):
pass
class ItemIngredientAdmin(admin.ModelAdmin):
pass
class GeekeventsTokenInline(admin.StackedInline):
readonly_fields = ('ge_user_id', 'timestamp', 'token')
model = GeekeventsToken
extra = 0
def __unicode__(self):
return self.token
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
class UserAdmin(admin.ModelAdmin):
search_fields = ('card', 'first_name', 'last_name',)
list_display = ('full_name', 'credit',)
inlines = [GeekeventsTokenInline]
def get_inline_instances(self, request, obj=None):
if not obj or not hasattr(obj, 'geekeventstoken'):
return []
return super(UserAdmin, self).get_inline_instances(request, obj)
def full_name(self, obj):
if hasattr(obj, 'geekeventstoken'):
return '{} {} via Geekevents SSO'.format(obj.first_name, obj.last_name)
return '{} {}'.format(obj.first_name, obj.last_name)
pass
class IngredientAdmin(admin.ModelAdmin):
pass
class ItemAdmin(admin.ModelAdmin):
pass
class OrderLineAdmin(admin.ModelAdmin):
readonly_fields = ('ingredients', 'item', 'price')
list_display = ('item', 'order', 'state')
class OrderLineInline(admin.TabularInline):
readonly_fields = ('item', 'ingredients', 'price', 'message')
model = OrderLine
extra = 0
def __unicode__(self):
return ''
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
class CategoryAdmin(admin.ModelAdmin):
pass
class ShiftAdmin(admin.ModelAdmin):
pass
class SumUpAPIKeyAdmin(admin.ModelAdmin):
pass
class SumUpOnlineAdmin(admin.ModelAdmin):
readonly_fields = ('id', 'created', 'timestamp', 'transaction_id', 'transaction_comment')
ordering = ('-created',)
list_display = ('user', 'amount', 'status', 'created', 'transaction_id', 'transaction_comment',)
pass
class SumUpTerminalAdmin(admin.ModelAdmin):
pass
class SumUpTransactionAdmin(admin.ModelAdmin):
pass
class SumUpCardAdmin(admin.ModelAdmin):
readonly_fields = ('id', 'created', 'timestamp')
ordering = ('-created',)
list_display = ('user', 'amount', 'status', 'created', 'transaction_id', 'transaction_comment',)
pass
class FoodLogAdmin(admin.ModelAdmin):
readonly_fields = ('orderline', 'state', 'timestamp')
list_display = ('orderline_id', 'orderline', 'timestamp', 'state')
pass
class FoodLogInline(admin.TabularInline):
readonly_fields = ('orderline', 'state', 'timestamp')
model = FoodLog
extra = 0
def __unicode__(self):
return ''
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
class OrderAdmin(admin.ModelAdmin):
readonly_fields = ('user', 'payment_method', 'cashier', 'authenticated_user')
list_display = ('id', 'user', 'date', 'sum', 'state')
inlines = [OrderLineInline]
class PrinterAdmin(admin.ModelAdmin):
pass
admin.site.register(User, UserAdmin)
admin.site.register(Ingredient, IngredientAdmin)
admin.site.register(Item, ItemAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderLine, OrderLineAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Shift, ShiftAdmin)
admin.site.register(ItemIngredient, ItemIngredientAdmin)
admin.site.register(Discount, DiscountAdmin)
admin.site.register(CreditUpdate, CreditUpdateAdmin)
admin.site.register(SumUpAPIKey, SumUpAPIKeyAdmin)
admin.site.register(SumUpTerminal, SumUpTerminalAdmin)
admin.site.register(SumUpTransaction, SumUpTransactionAdmin)
admin.site.register(SumUpCard, SumUpCardAdmin)
admin.site.register(SumUpOnline, SumUpOnlineAdmin)
admin.site.register(FoodLog, FoodLogAdmin)
admin.site.register(Printer, PrinterAdmin)
|
{
"content_hash": "4f02cc86a592d9d1a253e5ef9829edc8",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 108,
"avg_line_length": 27.349397590361445,
"alnum_prop": 0.7068281938325991,
"repo_name": "nuxis/p0sX-server",
"id": "416a56b1d46479066a7a621b3a5a838e755dee98",
"size": "4540",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "p0sx/pos/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "351"
},
{
"name": "Makefile",
"bytes": "663"
},
{
"name": "Python",
"bytes": "126660"
},
{
"name": "Shell",
"bytes": "955"
},
{
"name": "Standard ML",
"bytes": "1597"
}
],
"symlink_target": ""
}
|
def average(array):
pass
# your code goes here
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
|
{
"content_hash": "1633a4dd72891f63d7d32dd2f87387f5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 41,
"avg_line_length": 19.1,
"alnum_prop": 0.5445026178010471,
"repo_name": "jerodg/hackerrank-python",
"id": "3f99c14eaff0c523c952a2c1418db293a137b01b",
"size": "191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/03.Sets/01.Introduction/template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39802"
}
],
"symlink_target": ""
}
|
"""
=========================================
Tractography Clustering with QuickBundles
=========================================
This example explains how we can use QuickBundles [Garyfallidis12]_ to
simplify/cluster streamlines.
First import the necessary modules.
"""
import numpy as np
from nibabel import trackvis as tv
from dipy.segment.clustering import QuickBundles
from dipy.io.pickles import save_pickle
from dipy.data import get_data
from dipy.viz import fvtk
"""
For educational purposes we will try to cluster a small streamline bundle known
from neuroanatomy as the fornix.
"""
fname = get_data('fornix')
"""
Load fornix streamlines.
"""
streams, hdr = tv.read(fname)
streamlines = [i[0] for i in streams]
"""
Perform QuickBundles clustering using the MDF metric and a 10mm distance
threshold. Keep in mind that since the MDF metric requires streamlines to have
the same number of points, the clustering algorithm will internally use a
representation of streamlines that have been automatically downsampled/upsampled
so they have only 12 points (To set manually the number of points,
see :ref:`clustering-examples-ResampleFeature`).
"""
qb = QuickBundles(threshold=10.)
clusters = qb.cluster(streamlines)
"""
`clusters` is a `ClusterMap` object which contains attributes that
provide information about the clustering result.
"""
print("Nb. clusters:", len(clusters))
print("Cluster sizes:", map(len, clusters))
print("Small clusters:", clusters < 10)
print("Streamlines indices of the first cluster:\n", clusters[0].indices)
print("Centroid of the last cluster:\n", clusters[-1].centroid)
"""
::
Nb. clusters: 4
Cluster sizes: [64, 191, 47, 1]
Small clusters: array([False, False, False, True], dtype=bool)
Streamlines indices of the first cluster:
[0, 7, 8, 10, 11, 12, 13, 14, 15, 18, 26, 30, 33, 35, 41, 65, 66, 85, 100,
101, 105, 115, 116, 119, 122, 123, 124, 125, 126, 128, 129, 135, 139, 142,
143, 144, 148, 151, 159, 167, 175, 180, 181, 185, 200, 208, 210, 224, 237,
246, 249, 251, 256, 267, 270, 280, 284, 293, 296, 297, 299]
Centroid of the last cluster:
array([[ 84.83773804, 117.92590332, 77.32278442],
[ 86.10850525, 115.84362793, 81.91885376],
[ 86.40357208, 112.25676727, 85.72930145],
[ 86.48336792, 107.60327911, 88.13782501],
[ 86.23897552, 102.5100708 , 89.29447174],
[ 85.04563904, 97.46020508, 88.54240417],
[ 82.60240173, 93.14851379, 86.84208679],
[ 78.98937225, 89.57682037, 85.63652039],
[ 74.72344208, 86.60827637, 84.9391861 ],
[ 70.40846252, 85.15874481, 82.4484024 ],
[ 66.74534607, 86.00262451, 78.82582092],
[ 64.02451324, 88.43942261, 75.0697403 ]], dtype=float32)
`clusters` has also attributes like `centroids` (cluster representatives), and
methods like `add`, `remove`, and `clear` to modify the clustering result.
Lets first show the initial dataset.
"""
ren = fvtk.ren()
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white))
fvtk.record(ren, n_frames=1, out_path='fornix_initial.png', size=(600, 600))
"""
.. figure:: fornix_initial.png
:align: center
**Initial Fornix dataset**.
Show the centroids of the fornix after clustering (with random colors):
"""
colormap = fvtk.create_colormap(np.arange(len(clusters)))
fvtk.clear(ren)
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white, opacity=0.05))
fvtk.add(ren, fvtk.streamtube(clusters.centroids, colormap, linewidth=0.4))
fvtk.record(ren, n_frames=1, out_path='fornix_centroids.png', size=(600, 600))
"""
.. figure:: fornix_centroids.png
:align: center
**Showing the different QuickBundles centroids with random colors**.
Show the labeled fornix (colors from centroids).
"""
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
colormap_full[cluster.indices] = color
fvtk.clear(ren)
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
fvtk.record(ren, n_frames=1, out_path='fornix_clusters.png', size=(600, 600))
"""
.. figure:: fornix_clusters.png
:align: center
**Showing the different clusters**.
It is also possible to save the complete `ClusterMap` object with pickling.
"""
save_pickle('QB.pkl', clusters)
"""
Finally, here is a video of QuickBundles applied on a larger dataset.
.. raw:: html
<iframe width="420" height="315" src="http://www.youtube.com/embed/kstL7KKqu94" frameborder="0" allowfullscreen></iframe>
.. include:: ../links_names.inc
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience, vol
6, no 175, 2012.
"""
|
{
"content_hash": "6924f76cfd9edb1cb5c6a30a172d7460",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 125,
"avg_line_length": 30.90506329113924,
"alnum_prop": 0.6768380094204383,
"repo_name": "matthieudumont/dipy",
"id": "ce79b45d1bf108473a61dd37ddac0ed90083bf38",
"size": "4883",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "doc/examples/segment_quickbundles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2844"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2944439"
}
],
"symlink_target": ""
}
|
"""
Unit tests for the MySQL implementation of spectrum libraries
"""
from os import path as os_path
import uuid
import unittest
import fourgp_speclib
from test_spectrum_library_sql import TestSpectrumLibrarySQL
# These tests require a test MySQL database to be present
db_host = "localhost"
db_user = "fourgp_unittest"
db_passwd = "fourgp_unittest"
db_name = "fourgp_unittest"
class TestSpectrumLibraryMySqlCreation(unittest.TestCase):
def test_database_creation(self):
"""
Test that we can create a new SpectrumLibrary based on an MySQL database.
"""
unique_filename = uuid.uuid4()
db_path = os_path.join("/tmp", "speclib_test_{}".format(unique_filename))
lib = fourgp_speclib.SpectrumLibraryMySql(path=db_path, create=True, purge_db=True,
db_user=db_user, db_passwd=db_passwd,
db_name=db_name, db_host=db_host)
lib.purge()
def test_multiple_libraries(self):
"""
Test that we can create multiple SpectrumLibraries at once.
"""
unique_filename_1 = uuid.uuid4()
db_path_1 = os_path.join("/tmp", "speclib_test_{}".format(unique_filename_1))
unique_filename_2 = uuid.uuid4()
db_path_2 = os_path.join("/tmp", "speclib_test_{}".format(unique_filename_2))
lib_1 = fourgp_speclib.SpectrumLibraryMySql(path=db_path_1, create=True, purge_db=True,
db_user=db_user, db_passwd=db_passwd,
db_name=db_name, db_host=db_host)
lib_2 = fourgp_speclib.SpectrumLibraryMySql(path=db_path_2, create=True,
db_user=db_user, db_passwd=db_passwd,
db_name=db_name, db_host=db_host)
lib_3 = fourgp_speclib.SpectrumLibraryMySql(path=db_path_1,
db_user=db_user, db_passwd=db_passwd,
db_name=db_name, db_host=db_host)
lib_4 = fourgp_speclib.SpectrumLibraryMySql(path=db_path_2,
db_user=db_user, db_passwd=db_passwd,
db_name=db_name, db_host=db_host)
lib_3.close()
lib_4.close()
lib_1.purge()
lib_2.purge()
def test_non_existent_database(self):
"""
Test that we get an exception if we try to open a SpectrumLibrary that doesn't exist.
"""
unique_filename = uuid.uuid4()
db_path = os_path.join("/tmp", "speclib_test_{}".format(unique_filename))
with self.assertRaises(AssertionError):
fourgp_speclib.SpectrumLibraryMySql(path=db_path, create=False, purge_db=True,
db_user=db_user, db_passwd=db_passwd,
db_name=db_name, db_host=db_host)
class TestSpectrumLibraryMySQLBinary(unittest.TestCase, TestSpectrumLibrarySQL):
def setUp(self):
"""
Open connection to a clean SpectrumLibrary based on MySQL.
"""
unique_filename = uuid.uuid4()
self._db_path = os_path.join("/tmp", "speclib_test_{}".format(unique_filename))
self._lib = fourgp_speclib.SpectrumLibraryMySql(path=self._db_path, create=True, purge_db=True,
binary_spectra=True,
db_user=db_user, db_passwd=db_passwd,
db_name=db_name, db_host=db_host)
def tearDown(self):
"""
Tear down SpectrumLibrary based on MySQL.
"""
self._lib.purge()
class TestSpectrumLibraryMySQLGzip(unittest.TestCase, TestSpectrumLibrarySQL):
def setUp(self):
"""
Open connection to a clean SpectrumLibrary based on MySQL.
"""
unique_filename = uuid.uuid4()
self._db_path = os_path.join("/tmp", "speclib_test_{}".format(unique_filename))
self._lib = fourgp_speclib.SpectrumLibraryMySql(path=self._db_path, create=True, purge_db=True,
gzip_spectra=True, binary_spectra=False,
db_user=db_user, db_passwd=db_passwd,
db_name=db_name, db_host=db_host)
def tearDown(self):
"""
Tear down SpectrumLibrary based on MySQL.
"""
self._lib.purge()
class TestSpectrumLibraryMySQLText(unittest.TestCase, TestSpectrumLibrarySQL):
def setUp(self):
"""
Open connection to a clean SpectrumLibrary based on MySQL.
"""
unique_filename = uuid.uuid4()
self._db_path = os_path.join("/tmp", "speclib_test_{}".format(unique_filename))
self._lib = fourgp_speclib.SpectrumLibraryMySql(path=self._db_path, create=True, purge_db=True,
binary_spectra=False, gzip_spectra=False,
db_user=db_user, db_passwd=db_passwd,
db_name=db_name, db_host=db_host)
def tearDown(self):
"""
Tear down SpectrumLibrary based on MySQL.
"""
self._lib.purge()
# Run tests if we are run from command line
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "76617657c89183f5750ab9b8acddb9e3",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 103,
"avg_line_length": 44.38582677165354,
"alnum_prop": 0.5306013837147419,
"repo_name": "dcf21/4most-4gp",
"id": "6c40fc1d7c332b9e64511c5a5fc49af03ad2d33a",
"size": "5685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pythonModules/fourgp_speclib/fourgp_speclib/tests/test_spectrum_library_mysql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2812"
},
{
"name": "HTML",
"bytes": "11895"
},
{
"name": "JavaScript",
"bytes": "4498"
},
{
"name": "Python",
"bytes": "462838"
}
],
"symlink_target": ""
}
|
"""
=====================================
Time-frequency beamforming using DICS
=====================================
Compute DICS source power in a grid of time-frequency windows and display
results.
The original reference is:
Dalal et al. Five-dimensional neuroimaging: Localization of the time-frequency
dynamics of cortical activity. NeuroImage (2008) vol. 40 (4) pp. 1686-1700
"""
# Author: Roman Goj <roman.goj@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.event import make_fixed_length_events
from mne.datasets import sample
from mne.time_frequency import compute_epochs_csd
from mne.beamformer import tf_dics
from mne.viz import plot_source_spectrogram
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
noise_fname = data_path + '/MEG/sample/ernoise_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
###############################################################################
# Read raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443'] # 1 bad MEG channel
# Pick a selection of magnetometer channels. A subset of all channels was used
# to speed up the example. For a solution based on all MEG channels use
# meg=True, selection=None and add mag=4e-12 to the reject dictionary.
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads',
selection=left_temporal_channels)
raw.pick_channels([raw.ch_names[pick] for pick in picks])
reject = dict(mag=4e-12)
# Re-normalize our empty-room projectors, which should be fine after
# subselection
raw.info.normalize_proj()
# Setting time windows. Note that tmin and tmax are set so that time-frequency
# beamforming will be performed for a wider range of time points than will
# later be displayed on the final spectrogram. This ensures that all time bins
# displayed represent an average of an equal number of time windows.
tmin, tmax, tstep = -0.55, 0.75, 0.05 # s
tmin_plot, tmax_plot = -0.3, 0.5 # s
# Read epochs
event_id = 1
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=None, preload=True, proj=True, reject=reject)
# Read empty room noise raw data
raw_noise = mne.io.read_raw_fif(noise_fname, preload=True)
raw_noise.info['bads'] = ['MEG 2443'] # 1 bad MEG channel
raw_noise.pick_channels([raw_noise.ch_names[pick] for pick in picks])
raw_noise.info.normalize_proj()
# Create noise epochs and make sure the number of noise epochs corresponds to
# the number of data epochs
events_noise = make_fixed_length_events(raw_noise, event_id)
epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin_plot,
tmax_plot, baseline=None, preload=True, proj=True,
reject=reject)
epochs_noise.info.normalize_proj()
epochs_noise.apply_proj()
# then make sure the number of epochs is the same
epochs_noise = epochs_noise[:len(epochs.events)]
# Read forward operator
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Read label
label = mne.read_label(fname_label)
###############################################################################
# Time-frequency beamforming based on DICS
# Setting frequency bins as in Dalal et al. 2008
freq_bins = [(4, 12), (12, 30), (30, 55), (65, 300)] # Hz
win_lengths = [0.3, 0.2, 0.15, 0.1] # s
# Then set FFTs length for each frequency range.
# Should be a power of 2 to be faster.
n_ffts = [256, 128, 128, 128]
# Subtract evoked response prior to computation?
subtract_evoked = False
# Calculating noise cross-spectral density from empty room noise for each
# frequency bin and the corresponding time window length. To calculate noise
# from the baseline period in the data, change epochs_noise to epochs
noise_csds = []
for freq_bin, win_length, n_fft in zip(freq_bins, win_lengths, n_ffts):
noise_csd = compute_epochs_csd(epochs_noise, mode='fourier',
fmin=freq_bin[0], fmax=freq_bin[1],
fsum=True, tmin=-win_length, tmax=0,
n_fft=n_fft)
noise_csds.append(noise_csd)
# Computing DICS solutions for time-frequency windows in a label in source
# space for faster computation, use label=None for full solution
stcs = tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
freq_bins=freq_bins, subtract_evoked=subtract_evoked,
n_ffts=n_ffts, reg=0.001, label=label)
# Plotting source spectrogram for source with maximum activity
# Note that tmin and tmax are set to display a time range that is smaller than
# the one for which beamforming estimates were calculated. This ensures that
# all time bins shown are a result of smoothing across an identical number of
# time windows.
plot_source_spectrogram(stcs, freq_bins, tmin=tmin_plot, tmax=tmax_plot,
source_index=None, colorbar=True)
|
{
"content_hash": "782204ddc3e4da94a1b3c319cd12514c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 42.424,
"alnum_prop": 0.6745238544220252,
"repo_name": "wronk/mne-python",
"id": "7d7191b421e53c9aa49542a72cc03659b0e426b3",
"size": "5303",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/inverse/plot_tf_dics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3769"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5079143"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from contextlib import contextmanager
import io
import os
import shutil
import stat
def get_repo(path, search_parent_directories=True):
repo = None
try:
import git
repo = git.Repo(
path, search_parent_directories=search_parent_directories
)
except ImportError:
pass
except git.InvalidGitRepositoryError:
pass
return repo
def set_exe_file(filename, set_exe=True):
IXALL = stat.S_IXOTH | stat.S_IXGRP | stat.S_IXUSR
repo = get_repo(filename)
if repo:
mode = "+x" if set_exe else "-x"
repo.git.execute(
["git", "update-index", "--chmod=%s" % mode, filename]
)
mode = os.stat(filename).st_mode
if set_exe:
mode |= IXALL
else:
mode -= mode & IXALL
os.chmod(filename, mode)
@contextmanager
def write_file(filename):
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with io.open(filename, "w", encoding="utf-8", newline="\n") as fh:
yield fh
repo = get_repo(filename)
if repo:
repo.index.add([filename])
def touch_file(filename):
with write_file(filename) as fh:
fh.write("")
def remove_file_or_dir(filename):
if not os.path.isdir(filename):
return remove_file(filename)
repo = get_repo(filename)
if repo:
repo.index.remove([filename], r=True)
shutil.rmtree(filename)
def remove_file(filename):
touch_file(filename)
repo = get_repo(filename)
if repo:
repo.index.remove([filename])
os.remove(filename)
dirname = os.path.dirname(filename)
if dirname and not os.listdir(dirname):
os.removedirs(dirname)
def copy_file(src, dst):
"""
Tried to copy utf-8 text files line-by-line to avoid
getting CRLF characters added on Windows.
If the file fails to be decoded with utf-8, we revert to a regular copy.
"""
try:
with io.open(src, "r", encoding="utf-8") as fh_src:
with io.open(dst, "w", encoding="utf-8", newline="\n") as fh_dst:
for line in fh_src:
fh_dst.write(line)
except UnicodeDecodeError:
# Leave any other files alone.
shutil.copy(src, dst)
shutil.copymode(src, dst)
repo = get_repo(dst)
if repo:
repo.index.add([dst])
|
{
"content_hash": "3985c753f4ba72f22862bdc634f08aa3",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 77,
"avg_line_length": 22.79047619047619,
"alnum_prop": 0.6071876305892185,
"repo_name": "ocefpaf/conda-smithy",
"id": "6878850af8d492af81a923f356661cce9c6a08b9",
"size": "2393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conda_smithy/feedstock_io.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "485138"
},
{
"name": "Shell",
"bytes": "13876"
}
],
"symlink_target": ""
}
|
import datetime, math
from serenity_pypeline.logger import log
from serenity_pypeline.db.influxdb_connector import InfluxDbConnector
from serenity_pypeline.filters.filter import Filter
class CfFinisherException(Exception):
pass
class CfFinisher(Filter):
KEY_FOR_DATA = 'data_to_insert'
KEY_NAME = 'measurement'
DEFAULT_CORR_PREFIX = 'cf_corr_'
DEFAULT_SEPARATOR = '_'
STATUS_CODE_SUCCESSFUL = 0
def __init__(self, conf):
super(CfFinisher, self).__init__(conf)
# TODO: type of database engine
# should be loaded from the workflow configuration file
self._dbConnector = InfluxDbConnector(conf)
self._dbConnector.connect()
self._result = None
self.node = conf['default']['node']
def run(self, **kwargs):
# TODO: 2 matrixes as result?
log.debug("CF_FINISH")
if CfFinisher.KEY_FOR_DATA in kwargs:
self._insert_data(kwargs[CfFinisher.KEY_FOR_DATA],
kwargs[CfFinisher.KEY_NAME])
return CfFinisher.STATUS_CODE_SUCCESSFUL
else:
raise CfFinisherException(
'No data for insert retrieved from a previous step. Failing...')
def _insert_data(self, data_to_insert, measurement):
result = []
for name, data in data_to_insert.iteritems():
json_record = self._create_record_json(measurement+'_'+name,
data)
result.append(json_record)
self._dbConnector.write_data(result)
def _create_record_json(self, measurement, data):
record_json = {
"measurement": measurement + '_' + self.node,
"fields": {
}
}
for i, val in enumerate(data):
record_json['fields']['f'+str(i+1)] = val
return record_json
|
{
"content_hash": "a6a18a358aea4a374e880a5607670643",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 30.098360655737704,
"alnum_prop": 0.605119825708061,
"repo_name": "Bplotka/serenity-pypeline",
"id": "462a43a718b5a96b7cff848d13ca163ffc51105d",
"size": "1836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serenity_pypeline/filters/cf_finisher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28470"
}
],
"symlink_target": ""
}
|
__all__ = ["HeapSnapshotTaker"]
from devtools_event_listener import DevToolsEventListener
from status import *
from base.log import VLOG
# Take the heap snapshot.
class HeapSnapshotTaker(DevToolsEventListener):
def __init__(self, client):
self.client = client
self.client.AddListener(self)
self.snapshot_uid = -1
self.snapshot = ""
# return status and snapshot<value>
def TakeSnapshot(self):
snapshot = None
status1 = self._TakeSnapshotInternal()
params = {}
status2 = self.client.SendCommand("Debugger.disable", params)
status3 = Status(kOk)
if self.snapshot_uid != -1:
# Clear the snapshot cached in xwalk.
status3 = self.client.SendCommand("HeapProfiler.clearProfiles", params)
status4 = Status(kOk)
if status1.IsOk() and status2.IsOk() and status3.IsOk():
try:
snapshot = json.loads(self.snapshot)
except:
status4 = Status(kUnknownError, "heap snapshot not in JSON format")
self.snapshot_uid = -1
self.snapshot = ""
if status1.IsError():
return (status1, snapshot)
elif status2.IsError():
return (status2, snapshot)
elif status3.IsError():
return (status3, snapshot)
else:
return (status4, snapshot)
# Overridden from DevToolsEventListener:
def OnEvent(self, client, method, params):
if method == "HeapProfiler.addProfileHeader":
#self.snapshot_uid = params.get("header.uid", None)
self.snapshot_uid = params["header"].get("uid", None)
if self.snapshot_uid != -1:
VLOG(3, "multiple heap snapshot triggered")
#TODO: header.uid format
elif type(params["header"].get("uid")) != int:
return Status(kUnknownError, "HeapProfiler.addProfileHeader has invalid 'header.uid'")
elif method == "HeapProfiler.addHeapSnapshotChunk":
uid = -1
uid = params.get("uid")
if type(uid) != int:
return Status(kUnknownError, "HeapProfiler.addHeapSnapshotChunk has no 'uid'")
elif uid == self.snapshot_uid:
chunk = params.get("chunk")
if type(chunk) != str:
return Status(kUnknownError, "HeapProfiler.addHeapSnapshotChunk has no 'chunk'")
self.snapshot += chunk
else:
VLOG(3, "expect chunk event uid " + self.snapshot_uid + ", but got " + str(uid))
return Status(kOk)
def _TakeSnapshotInternal(self):
if self.snapshot_uid != -1:
return Status(kUnknownError, "unexpected heap snapshot was triggered")
params = {}
kMethods = ["Debugger.enable", "HeapProfiler.collectGarbage", "HeapProfiler.takeHeapSnapshot"]
for i in kMethods:
status = self.client.SendCommand(i, params)
if status.IsError():
return status
if self.snapshot_uid == -1:
return Status(kUnknownError, "failed to receive snapshot uid")
uid_params = {}
uid_params["uid"] = self.snapshot_uid
status = self.client.SendCommand("HeapProfiler.getHeapSnapshot", uid_params)
if status.IsError():
return status
return Status(kOk)
|
{
"content_hash": "e3395a89230df8c5670cf181c8b1be94",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 98,
"avg_line_length": 36.07142857142857,
"alnum_prop": 0.6623762376237624,
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"id": "8dc3ce96b9084d85969eac71f993917d8caec70b",
"size": "3030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "browser/heap_snapshot_taker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "241573"
}
],
"symlink_target": ""
}
|
import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['project'], 'annotran')
|
{
"content_hash": "1a84e2559c716c2ec035491f16825933",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 53,
"avg_line_length": 22.647058823529413,
"alnum_prop": 0.6415584415584416,
"repo_name": "birkbeckOLH/annotran",
"id": "5726f826e86512093903f4dc63e86e3bbf76ddaf",
"size": "385",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "annotran/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "327"
},
{
"name": "CoffeeScript",
"bytes": "60915"
},
{
"name": "HTML",
"bytes": "27928"
},
{
"name": "JavaScript",
"bytes": "88017"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "Python",
"bytes": "162015"
}
],
"symlink_target": ""
}
|
from django.core.paginator import Paginator
from django.http import Http404
from django.shortcuts import get_object_or_404, render
from wagtail.admin.forms.choosers import EmailLinkChooserForm, ExternalLinkChooserForm
from wagtail.admin.forms.search import SearchForm
from wagtail.admin.modal_workflow import render_modal_workflow
from wagtail.core import hooks
from wagtail.core.models import Page, UserPagePermissionsProxy
from wagtail.core.utils import resolve_model_string
def shared_context(request, extra_context=None):
context = {
# parent_page ID is passed as a GET parameter on the external_link and email_link views
# so that it's remembered when browsing from 'Internal link' to another link type
# and back again. On the 'browse' / 'internal link' view this will be overridden to be
# sourced from the standard URL path parameter instead.
'parent_page_id': request.GET.get('parent_page_id'),
'allow_external_link': request.GET.get('allow_external_link'),
'allow_email_link': request.GET.get('allow_email_link'),
}
if extra_context:
context.update(extra_context)
return context
def page_models_from_string(string):
page_models = []
for sub_string in string.split(','):
page_model = resolve_model_string(sub_string)
if not issubclass(page_model, Page):
raise ValueError("Model is not a page")
page_models.append(page_model)
return tuple(page_models)
def filter_page_type(queryset, page_models):
qs = queryset.none()
for model in page_models:
qs |= queryset.type(model)
return qs
def can_choose_page(page, permission_proxy, desired_classes, can_choose_root=True, user_perm=None):
"""Returns boolean indicating of the user can choose page.
will check if the root page can be selected and if user permissions
should be checked.
"""
if not issubclass(page.specific_class or Page, desired_classes) and not desired_classes == (Page, ):
return False
elif not can_choose_root and page.is_root():
return False
if user_perm == 'copy_to':
return permission_proxy.for_page(page).can_add_subpage()
return True
def browse(request, parent_page_id=None):
# A missing or empty page_type parameter indicates 'all page types'
# (i.e. descendants of wagtailcore.page)
page_type_string = request.GET.get('page_type') or 'wagtailcore.page'
user_perm = request.GET.get('user_perms', False)
try:
desired_classes = page_models_from_string(page_type_string)
except (ValueError, LookupError):
raise Http404
# Find parent page
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id)
elif desired_classes == (Page,):
# Just use the root page
parent_page = Page.get_first_root_node()
else:
# Find the highest common ancestor for the specific classes passed in
# In many cases, such as selecting an EventPage under an EventIndex,
# this will help the administrator find their page quicker.
all_desired_pages = filter_page_type(Page.objects.all(), desired_classes)
parent_page = all_desired_pages.first_common_ancestor()
parent_page = parent_page.specific
# Get children of parent page
pages = parent_page.get_children().specific()
# allow hooks to modify the queryset
for hook in hooks.get_hooks('construct_page_chooser_queryset'):
pages = hook(pages, request)
# Filter them by page type
if desired_classes != (Page,):
# restrict the page listing to just those pages that:
# - are of the given content type (taking into account class inheritance)
# - or can be navigated into (i.e. have children)
choosable_pages = filter_page_type(pages, desired_classes)
descendable_pages = pages.filter(numchild__gt=0)
pages = choosable_pages | descendable_pages
can_choose_root = request.GET.get('can_choose_root', False)
# Do permission lookups for this user now, instead of for every page.
permission_proxy = UserPagePermissionsProxy(request.user)
# Parent page can be chosen if it is a instance of desired_classes
parent_page.can_choose = can_choose_page(
parent_page, permission_proxy, desired_classes, can_choose_root, user_perm)
# Pagination
# We apply pagination first so we don't need to walk the entire list
# in the block below
paginator = Paginator(pages, per_page=25)
pages = paginator.get_page(request.GET.get('p'))
# Annotate each page with can_choose/can_decend flags
for page in pages:
page.can_choose = can_choose_page(page, permission_proxy, desired_classes, can_choose_root, user_perm)
page.can_descend = page.get_children_count()
# Render
context = shared_context(request, {
'parent_page': parent_page,
'parent_page_id': parent_page.pk,
'pages': pages,
'search_form': SearchForm(),
'page_type_string': page_type_string,
'page_type_names': [desired_class.get_verbose_name() for desired_class in desired_classes],
'page_types_restricted': (page_type_string != 'wagtailcore.page')
})
return render_modal_workflow(
request,
'wagtailadmin/chooser/browse.html', None,
context,
json_data={'step': 'browse', 'parent_page_id': context['parent_page_id']},
)
def search(request, parent_page_id=None):
# A missing or empty page_type parameter indicates 'all page types' (i.e. descendants of wagtailcore.page)
page_type_string = request.GET.get('page_type') or 'wagtailcore.page'
try:
desired_classes = page_models_from_string(page_type_string)
except (ValueError, LookupError):
raise Http404
pages = Page.objects.all()
# allow hooks to modify the queryset
for hook in hooks.get_hooks('construct_page_chooser_queryset'):
pages = hook(pages, request)
search_form = SearchForm(request.GET)
if search_form.is_valid() and search_form.cleaned_data['q']:
pages = pages.exclude(
depth=1 # never include root
)
pages = filter_page_type(pages, desired_classes)
pages = pages.specific()
pages = pages.search(search_form.cleaned_data['q'])
else:
pages = pages.none()
paginator = Paginator(pages, per_page=25)
pages = paginator.get_page(request.GET.get('p'))
for page in pages:
page.can_choose = True
return render(
request, 'wagtailadmin/chooser/_search_results.html',
shared_context(request, {
'searchform': search_form,
'pages': pages,
'page_type_string': page_type_string,
})
)
def external_link(request):
initial_data = {
'url': request.GET.get('link_url', ''),
'link_text': request.GET.get('link_text', ''),
}
if request.method == 'POST':
form = ExternalLinkChooserForm(request.POST, initial=initial_data, prefix='external-link-chooser')
if form.is_valid():
result = {
'url': form.cleaned_data['url'],
'title': form.cleaned_data['link_text'].strip() or form.cleaned_data['url'],
# If the user has explicitly entered / edited something in the link_text field,
# always use that text. If not, we should favour keeping the existing link/selection
# text, where applicable.
# (Normally this will match the link_text passed in the URL here anyhow,
# but that won't account for non-text content such as images.)
'prefer_this_title_as_link_text': ('link_text' in form.changed_data),
}
return render_modal_workflow(
request, None, None,
None, json_data={'step': 'external_link_chosen', 'result': result}
)
else:
form = ExternalLinkChooserForm(initial=initial_data, prefix='external-link-chooser')
return render_modal_workflow(
request,
'wagtailadmin/chooser/external_link.html', None,
shared_context(request, {
'form': form,
}), json_data={'step': 'external_link'}
)
def email_link(request):
initial_data = {
'link_text': request.GET.get('link_text', ''),
'email_address': request.GET.get('link_url', ''),
}
if request.method == 'POST':
form = EmailLinkChooserForm(request.POST, initial=initial_data, prefix='email-link-chooser')
if form.is_valid():
result = {
'url': 'mailto:' + form.cleaned_data['email_address'],
'title': form.cleaned_data['link_text'].strip() or form.cleaned_data['email_address'],
# If the user has explicitly entered / edited something in the link_text field,
# always use that text. If not, we should favour keeping the existing link/selection
# text, where applicable.
'prefer_this_title_as_link_text': ('link_text' in form.changed_data),
}
return render_modal_workflow(
request, None, None,
None, json_data={'step': 'external_link_chosen', 'result': result}
)
else:
form = EmailLinkChooserForm(initial=initial_data, prefix='email-link-chooser')
return render_modal_workflow(
request,
'wagtailadmin/chooser/email_link.html', None,
shared_context(request, {
'form': form,
}), json_data={'step': 'email_link'}
)
|
{
"content_hash": "37a92f8ab4f2bfdee6bd220944b5dc43",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 110,
"avg_line_length": 37.75390625,
"alnum_prop": 0.6405587170201759,
"repo_name": "nealtodd/wagtail",
"id": "a0f299d4c6333ee84572e5b7c6c0325f3b5699d5",
"size": "9665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/admin/views/chooser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "190511"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "371011"
},
{
"name": "JavaScript",
"bytes": "262163"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3564287"
},
{
"name": "Shell",
"bytes": "8289"
}
],
"symlink_target": ""
}
|
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
mimic = {}
i = 0
with open(filename) as f:
wordlist = f.read().split()
l = len(wordlist)
for word in wordlist:
i += 1
if word in mimic:
if i < l:
mimic[word].append(wordlist[i])
else:
if i < l:
mimic[word] = [wordlist[i]]
mimic[""] = [wordlist[0]]
return mimic
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
result = ""
for i in range(200):
if word in mimic_dict:
word = random.choice(mimic_dict[word])
else:
word = random.choice(mimic_dict.keys())
result += " " + word
print result
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
|
{
"content_hash": "c634bf9ec2d4eebbf3257566862bfaa4",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 77,
"avg_line_length": 28.10843373493976,
"alnum_prop": 0.7003857693956279,
"repo_name": "brebory/google-python-exercises",
"id": "26103eb5bc8ca52ce176dd5a18ca7a0972b1299f",
"size": "2564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/mimic.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55197"
}
],
"symlink_target": ""
}
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig(object):
"""Base configuration."""
SECRET_KEY = 'afds7fn7aw4ytnachfw84'
DEBUG = False
BCRYPT_LOG_ROUNDS = 13
WTF_CSRF_ENABLED = True
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevelopmentConfig(BaseConfig):
"""Development configuration."""
DEBUG = True
BCRYPT_LOG_ROUNDS = 4
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + \
os.path.join(basedir, 'dev.sqlite')
DEBUG_TB_ENABLED = True
class TestingConfig(BaseConfig):
"""Testing configuration."""
DEBUG = True
TESTING = True
BCRYPT_LOG_ROUNDS = 4
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + \
os.path.join(basedir, 'test.sqlite')
DEBUG_TB_ENABLED = False
PRESERVE_CONTEXT_ON_EXCEPTION = False
class ProductionConfig(BaseConfig):
"""Production configuration."""
SECRET_KEY = 'my_precious'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example'
DEBUG_TB_ENABLED = False
|
{
"content_hash": "8760f44998a6a5eaaea58740b69b8706",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 62,
"avg_line_length": 26.790697674418606,
"alnum_prop": 0.6657986111111112,
"repo_name": "runozo/palinsesto-fire",
"id": "70007e77739bc645b179ed01f0c9d41933f32b47",
"size": "1180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/server/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "7864"
},
{
"name": "JavaScript",
"bytes": "347"
},
{
"name": "Python",
"bytes": "30174"
}
],
"symlink_target": ""
}
|
import sys, time
sys.path.append('../../')
from parsing.filing import filing
from parsing.form_parser import form_parser, ParserMissingError
from fec_alerts.utils.form_mappers import *
from write_csv_to_db import CSV_dumper
from fec_import_logging import fec_logger
from hstore_helpers import dict_to_hstore
from db_utils import get_connection
verbose = True
class FilingHeaderDoesNotExist(Exception):
pass
class FilingHeaderAlreadyProcessed(Exception):
pass
def process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id):
form = linedict['form_parser']
## Mark memo-ized rows as being superceded by an amendment.
try:
if linedict['memo_code']=='X':
linedict['superceded_by_amendment'] = True
except KeyError:
pass
#print "processing form type: %s" % (form)
if form=='SchA':
skeda_from_skedadict(linedict, filingnum, header_id, is_amended, cd)
elif form=='SchB':
skedb_from_skedbdict(linedict, filingnum, header_id, is_amended, cd)
elif form=='SchE':
skede_from_skededict(linedict, filingnum, header_id, is_amended, cd)
# Treat 48-hour contribution notices like sked A.
# Requires special handling for amendment, since these are superceded
# by regular F3 forms.
elif form=='F65':
skeda_from_f65(linedict, filingnum, header_id, is_amended, cd)
# disclosed donor to non-commmittee. Sorta rare, but..
elif form=='F56':
skeda_from_f56(linedict, filingnum, header_id, is_amended, cd)
# disclosed electioneering donor
elif form=='F92':
skeda_from_f92(linedict, filingnum, header_id, is_amended, cd)
# inaugural donors
elif form=='F132':
skeda_from_f132(linedict, filingnum, header_id, is_amended, cd)
#inaugural refunds
elif form=='F133':
skeda_from_f133(linedict, filingnum, header_id, is_amended, cd)
# IE's disclosed by non-committees. Note that they use this for * both * quarterly and 24- hour notices. There's not much consistency with this--be careful with superceding stuff.
elif form=='F57':
skede_from_f57(linedict, filingnum, header_id, is_amended, cd)
# Its another kind of line. Just dump it in Other lines.
else:
otherline_from_line(linedict, filingnum, header_id, is_amended, cd, filer_id)
def process_filing_body(filingnum, fp=None, logger=None):
#It's useful to pass the form parser in when running in bulk so we don't have to keep creating new ones.
if not fp:
fp = form_parser()
if not logger:
logger=fec_logger()
msg = "process_filing_body: Starting # %s" % (filingnum)
#print msg
logger.info(msg)
connection = get_connection()
cursor = connection.cursor()
cmd = "select fec_id, is_superceded, data_is_processed from fec_alerts_new_filing where filing_number=%s" % (filingnum)
cursor.execute(cmd)
cd = CSV_dumper(connection)
result = cursor.fetchone()
if not result:
msg = 'process_filing_body: Couldn\'t find a new_filing for filing %s' % (filingnum)
logger.error(msg)
raise FilingHeaderDoesNotExist(msg)
# will throw a TypeError if it's missing.
header_id = 1
is_amended = result[1]
is_already_processed = result[2]
if is_already_processed:
msg = 'process_filing_body: This filing has already been entered.'
logger.error(msg)
raise FilingHeaderAlreadyProcessed(msg)
#print "Processing filing %s" % (filingnum)
f1 = filing(filingnum)
form = f1.get_form_type()
version = f1.get_version()
filer_id = f1.get_filer_id()
# only parse forms that we're set up to read
if not fp.is_allowed_form(form):
if verbose:
msg = "process_filing_body: Not a parseable form: %s - %s" % (form, filingnum)
# print msg
logger.error(msg)
return None
linenum = 0
while True:
linenum += 1
row = f1.get_body_row()
if not row:
break
#print "row is %s" % (row)
#print "\n\n\nForm is %s" % form
try:
linedict = fp.parse_form_line(row, version)
#print "\n\n\nform is %s" % form
process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)
except ParserMissingError:
msg = 'process_filing_body: Unknown line type in filing %s line %s: type=%s Skipping.' % (filingnum, linenum, row[0])
logger.warn(msg)
continue
# commit all the leftovers
cd.commit_all()
cd.close()
counter = cd.get_counter()
total_rows = 0
for i in counter:
total_rows += counter[i]
msg = "process_filing_body: Filing # %s Total rows: %s Tally is: %s" % (filingnum, total_rows, counter)
# print msg
logger.info(msg)
# this data has been moved here. At some point we should pick a single location for this data.
header_data = dict_to_hstore(counter)
cmd = "update fec_alerts_new_filing set lines_present='%s'::hstore where filing_number=%s" % (header_data, filingnum)
cursor.execute(cmd)
# mark file as having been entered.
cmd = "update fec_alerts_new_filing set data_is_processed = True where filing_number=%s" % (filingnum)
cursor.execute(cmd)
# flag this filer as one who has changed.
cmd = "update summary_data_committee_overlay set is_dirty=True where fec_id='%s'" % (filer_id)
cursor.execute(cmd)
"""
t0 = time.time()
process_filing_body(864353)
# 869853, 869866
#for fn in [869888]:
# process_filing_body(fn, fp)
t1 = time.time()
print "total time = " + str(t1-t0)
# long one: 767168
#FAILS WITH STATE ADDRESS PROBLEM: biggest one on file: 838168 (510 mb) - act blue - 2012-10-18 | 2012-11-26
# second biggest: 824988 (217.3mb) - act blue - 2012-10-01 | 2012-10-17 - 874K lines
# 840327 - 169MB C00431445 - OFA | 2012-10-18 | 2012-11-26
# 821325 - 144 mb Obama for america 2012-09-01 | 2012-09-30
# 798883 - 141 mb
# 804867 - 127 mb
# 827978 - 119 mb
# 754317 - 118 mb
"""
|
{
"content_hash": "3a2b741f578c10cbd1240b3d3cc102bc",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 184,
"avg_line_length": 32.44329896907217,
"alnum_prop": 0.6266285351128058,
"repo_name": "sunlightlabs/read_FEC",
"id": "09d8dc18c5a322b49a31d57766d08e183bbd564a",
"size": "6379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fecreader/formdata/utils/filing_body_processor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27432"
},
{
"name": "HTML",
"bytes": "357960"
},
{
"name": "JavaScript",
"bytes": "129989"
},
{
"name": "Python",
"bytes": "1881514"
},
{
"name": "Shell",
"bytes": "10604"
}
],
"symlink_target": ""
}
|
import argparse
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from fairscale.optim.oss import OSS
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
import torchvision
from torchvision import datasets, transforms
import time
from torch.nn.parallel import DistributedDataParallel as DDP
import os
from onnxruntime.training.ortmodule import ORTModule, DebugOptions
import numpy as np
# Usage :
# pip install fairscale
# python3 orttraining_test_ortmodule_fairscale_sharded_optimizer.py --use_sharded_optimizer --use_ortmodule
def dist_init(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size)
class NeuralNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1):
out = self.fc1(input1)
out = self.relu(out)
out = self.fc2(out)
return out
def get_dataloader(args, rank, batch_size):
# Data loading code
train_dataset = torchvision.datasets.MNIST(
root=args.data_dir, train=True, transform=transforms.ToTensor(), download=True
)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=args.world_size, rank=rank
)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=0,
pin_memory=True,
sampler=train_sampler,
)
test_loader = None
if args.test_batch_size > 0:
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
args.data_dir,
train=False,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]),
),
batch_size=args.test_batch_size,
shuffle=True,
)
return train_loader, test_loader
def my_loss(x, target, is_train=True):
if is_train:
return torch.nn.CrossEntropyLoss()(x, target)
else:
return torch.nn.CrossEntropyLoss(reduction="sum")(x, target)
def train_step(args, model, device, optimizer, loss_fn, train_loader, epoch):
print("\n======== Epoch {:} / {:} with batch size {:} ========".format(epoch + 1, args.epochs, args.batch_size))
model.train()
# Measure how long the training epoch takes.
t0 = time.time()
start_time = t0
# Reset the total loss for this epoch.
total_loss = 0
for iteration, (data, target) in enumerate(train_loader):
if iteration == args.train_steps:
break
data, target = data.to(device), target.to(device)
data = data.reshape(data.shape[0], -1)
optimizer.zero_grad()
probability = model(data)
if args.view_graphs:
import torchviz
pytorch_backward_graph = torchviz.make_dot(probability, params=dict(list(model.named_parameters())))
pytorch_backward_graph.view()
loss = loss_fn(probability, target)
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_loss += loss.item()
loss.backward()
optimizer.step()
# Stats
if iteration % args.log_interval == 0:
curr_time = time.time()
elapsed_time = curr_time - start_time
print(
"[{:5}/{:5} ({:2.0f}%)]\tLoss: {:.6f}\tExecution time: {:.4f}".format(
iteration * len(data),
len(train_loader.dataset),
100.0 * iteration / len(train_loader),
loss,
elapsed_time,
)
)
start_time = curr_time
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_loader)
epoch_time = time.time() - t0
print("\n Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epoch took: {:.4f}s".format(epoch_time))
return epoch_time
def test(args, model, device, loss_fn, test_loader):
model.eval()
t0 = time.time()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = data.reshape(data.shape[0], -1)
output = model(data)
# Stats
test_loss += loss_fn(output, target, False).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print(
"\nTest set: Batch size: {:}, Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
args.test_batch_size,
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
)
# Report the final accuracy for this validation run.
epoch_time = time.time() - t0
accuracy = float(correct) / len(test_loader.dataset)
print(" Accuracy: {0:.2f}".format(accuracy))
print(" Validation took: {:.4f}s".format(epoch_time))
return epoch_time, accuracy
def train(rank: int, args, world_size: int, epochs: int):
# DDP init example
dist_init(rank, world_size)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Setup
if not args.cpu:
torch.cuda.set_device(rank)
torch.cuda.manual_seed(0)
torch.manual_seed(0) # also sets the cuda seed
np.random.seed(0)
# Problem statement
model = NeuralNet(input_size=784, hidden_size=500, num_classes=10).to(rank)
if args.use_ortmodule:
print("Converting to ORTModule....")
debug_options = DebugOptions(save_onnx=args.export_onnx_graphs, onnx_prefix="NeuralNet")
model = ORTModule(model, debug_options)
train_dataloader, test_dataloader = get_dataloader(args, rank, args.batch_size)
loss_fn = my_loss
base_optimizer = torch.optim.SGD # pick any pytorch compliant optimizer here
base_optimizer_arguments = (
{}
) # pass any optimizer specific arguments here, or directly below when instantiating OSS
if args.use_sharded_optimizer:
# Wrap the optimizer in its state sharding brethren
optimizer = OSS(params=model.parameters(), optim=base_optimizer, lr=args.lr)
# Wrap the model into ShardedDDP, which will reduce gradients to the proper ranks
model = ShardedDDP(model, optimizer)
else:
device_ids = None if args.cpu else [rank]
model = DDP(model, device_ids=device_ids, find_unused_parameters=False) # type: ignore
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
# Any relevant training loop, nothing specific to OSS. For example:
model.train()
total_training_time, total_test_time, epoch_0_training, validation_accuracy = 0, 0, 0, 0
for epoch in range(epochs):
total_training_time += train_step(args, model, rank, optimizer, loss_fn, train_dataloader, epoch)
if epoch == 0:
epoch_0_training = total_training_time
if args.test_batch_size > 0:
test_time, validation_accuracy = test(args, model, rank, loss_fn, test_dataloader)
total_test_time += test_time
print("\n======== Global stats ========")
if args.use_ortmodule:
estimated_export = 0
if args.epochs > 1:
estimated_export = epoch_0_training - (total_training_time - epoch_0_training) / (args.epochs - 1)
print(" Estimated ONNX export took: {:.4f}s".format(estimated_export))
else:
print(" Estimated ONNX export took: Estimate available when epochs > 1 only")
print(" Accumulated training without export took: {:.4f}s".format(total_training_time - estimated_export))
print(" Accumulated training took: {:.4f}s".format(total_training_time))
print(" Accumulated validation took: {:.4f}s".format(total_test_time))
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Benchmark the optimizer state sharding, on a typical computer vision workload"
)
parser.add_argument("--world_size", action="store", default=2, type=int)
parser.add_argument("--epochs", action="store", default=10, type=int)
parser.add_argument("--batch_size", action="store", default=256, type=int)
parser.add_argument("--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)")
parser.add_argument("--use_sharded_optimizer", action="store_true", default=False, help="use sharded optim")
parser.add_argument(
"--train-steps",
type=int,
default=-1,
metavar="N",
help="number of steps to train. Set -1 to run through whole dataset (default: -1)",
)
parser.add_argument("--view-graphs", action="store_true", default=False, help="views forward and backward graphs")
parser.add_argument(
"--export-onnx-graphs", action="store_true", default=False, help="export ONNX graphs to current directory"
)
parser.add_argument(
"--log-interval",
type=int,
default=300,
metavar="N",
help="how many batches to wait before logging training status (default: 300)",
)
parser.add_argument("--cpu", action="store_true", default=False)
parser.add_argument("--use_ortmodule", action="store_true", default=False, help="use ortmodule")
parser.add_argument(
"--test-batch-size", type=int, default=64, metavar="N", help="input batch size for testing (default: 64)"
)
parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training")
parser.add_argument("--seed", type=int, default=42, metavar="S", help="random seed (default: 42)")
parser.add_argument("--data-dir", type=str, default="./mnist", help="Path to the mnist data directory")
args = parser.parse_args()
# Supposing that WORLD_SIZE and EPOCHS are somehow defined somewhere
mp.spawn(
train,
args=(
args,
args.world_size,
args.epochs,
),
nprocs=args.world_size,
join=True,
)
|
{
"content_hash": "4f7619cf193d716b12190ea093734df7",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 118,
"avg_line_length": 37.25342465753425,
"alnum_prop": 0.6223570509284795,
"repo_name": "microsoft/onnxruntime",
"id": "e1a7dd591ec36bd035e460da6aa19a14cc187ebc",
"size": "10878",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "orttraining/orttraining/test/python/orttraining_test_ortmodule_fairscale_sharded_optimizer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1763425"
},
{
"name": "Batchfile",
"bytes": "17040"
},
{
"name": "C",
"bytes": "955390"
},
{
"name": "C#",
"bytes": "2304597"
},
{
"name": "C++",
"bytes": "39435305"
},
{
"name": "CMake",
"bytes": "514764"
},
{
"name": "CSS",
"bytes": "138431"
},
{
"name": "Cuda",
"bytes": "1104338"
},
{
"name": "Dockerfile",
"bytes": "8089"
},
{
"name": "HLSL",
"bytes": "11234"
},
{
"name": "HTML",
"bytes": "5933"
},
{
"name": "Java",
"bytes": "418665"
},
{
"name": "JavaScript",
"bytes": "212575"
},
{
"name": "Jupyter Notebook",
"bytes": "218327"
},
{
"name": "Kotlin",
"bytes": "4653"
},
{
"name": "Liquid",
"bytes": "5457"
},
{
"name": "NASL",
"bytes": "2628"
},
{
"name": "Objective-C",
"bytes": "151027"
},
{
"name": "Objective-C++",
"bytes": "107084"
},
{
"name": "Pascal",
"bytes": "9597"
},
{
"name": "PowerShell",
"bytes": "16419"
},
{
"name": "Python",
"bytes": "5041661"
},
{
"name": "Roff",
"bytes": "27539"
},
{
"name": "Ruby",
"bytes": "3545"
},
{
"name": "Shell",
"bytes": "116513"
},
{
"name": "Swift",
"bytes": "115"
},
{
"name": "TypeScript",
"bytes": "973087"
}
],
"symlink_target": ""
}
|
from EXOSIMS.Prototypes.PlanetPopulation import PlanetPopulation
from EXOSIMS.PlanetPopulation.EarthTwinHabZone1 import EarthTwinHabZone1
import numpy as np
import astropy.units as u
class EarthTwinHabZone2(EarthTwinHabZone1):
"""
Population of Earth twins (1 R_Earth, 1 M_Eearth, 1 p_Earth)
On eccentric habitable zone orbits (0.7 to 1.5 AU).
This implementation is intended to enforce this population regardless
of JSON inputs. The only inputs that will not be disregarded are erange
and constrainOrbits.
"""
def __init__(self, eta=0.1, erange=[0.0, 0.9], constrainOrbits=True, **specs):
specs["erange"] = erange
specs["constrainOrbits"] = constrainOrbits
# specs being modified in EarthTwinHabZone1
specs["eta"] = eta
specs["arange"] = [0.7, 1.5]
specs["Rprange"] = [1, 1]
specs["Mprange"] = [1, 1]
specs["prange"] = [0.367, 0.367]
specs["scaleOrbits"] = True
PlanetPopulation.__init__(self, **specs)
def gen_plan_params(self, n):
"""Generate semi-major axis (AU), eccentricity, geometric albedo, and
planetary radius (earthRad)
Semi-major axis and eccentricity are uniformly distributed with all
other parameters constant.
Args:
n (integer):
Number of samples to generate
Returns:
tuple:
a (astropy Quantity array):
Semi-major axis in units of AU
e (float ndarray):
Eccentricity
p (float ndarray):
Geometric albedo
Rp (astropy Quantity array):
Planetary radius in units of earthRad
"""
n = self.gen_input_check(n)
# generate samples of semi-major axis
ar = self.arange.to("AU").value
# check if constrainOrbits == True for eccentricity
if self.constrainOrbits:
# restrict semi-major axis limits
arcon = np.array(
[ar[0] / (1.0 - self.erange[0]), ar[1] / (1.0 + self.erange[0])]
)
a = np.random.uniform(low=arcon[0], high=arcon[1], size=n) * u.AU
tmpa = a.to("AU").value
# upper limit for eccentricity given sma
elim = np.zeros(len(a))
amean = np.mean(ar)
elim[tmpa <= amean] = 1.0 - ar[0] / tmpa[tmpa <= amean]
elim[tmpa > amean] = ar[1] / tmpa[tmpa > amean] - 1.0
elim[elim > self.erange[1]] = self.erange[1]
elim[elim < self.erange[0]] = self.erange[0]
# uniform distribution
e = np.random.uniform(low=self.erange[0], high=elim, size=n)
else:
a = np.random.uniform(low=ar[0], high=ar[1], size=n) * u.AU
e = np.random.uniform(low=self.erange[0], high=self.erange[1], size=n)
# generate geometric albedo
p = 0.367 * np.ones((n,))
# generate planetary radius
Rp = np.ones((n,)) * u.earthRad
return a, e, p, Rp
|
{
"content_hash": "d68340b3e6eb3f7cc96521648b0f8c60",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 82,
"avg_line_length": 35.870588235294115,
"alnum_prop": 0.5716628402755002,
"repo_name": "dsavransky/EXOSIMS",
"id": "2e200d5d63fdea4877d94799eac0f48b6074b6c0",
"size": "3049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EXOSIMS/PlanetPopulation/EarthTwinHabZone2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8087"
},
{
"name": "Cython",
"bytes": "2459"
},
{
"name": "Python",
"bytes": "2936469"
}
],
"symlink_target": ""
}
|
"""
The VMware API VM utility module to build SOAP object specs.
"""
import collections
import copy
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as vutil
import six
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.network import model as network_model
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util
LOG = logging.getLogger(__name__)
vmware_utils_opts = [
cfg.IntOpt('console_delay_seconds',
help='Set this value if affected by an increased network '
'latency causing repeated characters when typing in '
'a remote console.'),
cfg.StrOpt('serial_port_service_uri',
help='Identifies the remote system that serial port traffic '
'will be sent to. If this is not set, no serial ports '
'will be added to the created VMs.'),
cfg.StrOpt('serial_port_proxy_uri',
help='Identifies a proxy service that provides network access '
'to the serial_port_service_uri. This option is ignored '
'if serial_port_service_uri is not specified.'),
]
CONF = cfg.CONF
CONF.register_opts(vmware_utils_opts, 'vmware')
ALL_SUPPORTED_NETWORK_DEVICES = ['VirtualE1000', 'VirtualE1000e',
'VirtualPCNet32', 'VirtualSriovEthernetCard',
'VirtualVmxnet', 'VirtualVmxnet3']
# A cache for VM references. The key will be the VM name
# and the value is the VM reference. The VM name is unique. This
# is either the UUID of the instance or UUID-rescue in the case
# that this is a rescue VM. This is in order to prevent
# unnecessary communication with the backend.
_VM_REFS_CACHE = {}
class Limits(object):
def __init__(self, limit=None, reservation=None,
shares_level=None, shares_share=None):
"""imits object holds instance limits for convenience."""
self.limit = limit
self.reservation = reservation
self.shares_level = shares_level
self.shares_share = shares_share
def validate(self):
if self.shares_level in ('high', 'normal', 'low'):
if self.shares_share:
reason = _("Share level '%s' cannot have share "
"configured") % self.shares_level
raise exception.InvalidInput(reason=reason)
return
if self.shares_level == 'custom':
return
if self.shares_level:
reason = _("Share '%s' is not supported") % self.shares_level
raise exception.InvalidInput(reason=reason)
def has_limits(self):
return bool(self.limit or
self.reservation or
self.shares_level)
class ExtraSpecs(object):
def __init__(self, cpu_limits=None, hw_version=None,
storage_policy=None, cores_per_socket=None,
memory_limits=None, disk_io_limits=None):
"""ExtraSpecs object holds extra_specs for the instance."""
if cpu_limits is None:
cpu_limits = Limits()
self.cpu_limits = cpu_limits
if memory_limits is None:
memory_limits = Limits()
self.memory_limits = memory_limits
if disk_io_limits is None:
disk_io_limits = Limits()
self.disk_io_limits = disk_io_limits
self.hw_version = hw_version
self.storage_policy = storage_policy
self.cores_per_socket = cores_per_socket
def vm_refs_cache_reset():
global _VM_REFS_CACHE
_VM_REFS_CACHE = {}
def vm_ref_cache_delete(id):
_VM_REFS_CACHE.pop(id, None)
def vm_ref_cache_update(id, vm_ref):
_VM_REFS_CACHE[id] = vm_ref
def vm_ref_cache_get(id):
return _VM_REFS_CACHE.get(id)
def _vm_ref_cache(id, func, session, data):
vm_ref = vm_ref_cache_get(id)
if not vm_ref:
vm_ref = func(session, data)
vm_ref_cache_update(id, vm_ref)
return vm_ref
def vm_ref_cache_from_instance(func):
@functools.wraps(func)
def wrapper(session, instance):
id = instance.uuid
return _vm_ref_cache(id, func, session, instance)
return wrapper
def vm_ref_cache_from_name(func):
@functools.wraps(func)
def wrapper(session, name):
id = name
return _vm_ref_cache(id, func, session, name)
return wrapper
# the config key which stores the VNC port
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
VmdkInfo = collections.namedtuple('VmdkInfo', ['path', 'adapter_type',
'disk_type',
'capacity_in_bytes',
'device'])
def _iface_id_option_value(client_factory, iface_id, port_index):
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % port_index
opt.value = iface_id
return opt
def _get_allocation_info(client_factory, limits, allocation_type):
allocation = client_factory.create(allocation_type)
if limits.limit:
allocation.limit = limits.limit
else:
# Set as 'umlimited'
allocation.limit = -1
if limits.reservation:
allocation.reservation = limits.reservation
else:
allocation.reservation = 0
shares = client_factory.create('ns0:SharesInfo')
if limits.shares_level:
shares.level = limits.shares_level
if (shares.level == 'custom' and
limits.shares_share):
shares.shares = limits.shares_share
else:
shares.shares = 0
else:
shares.level = 'normal'
shares.shares = 0
allocation.shares = shares
return allocation
def get_vm_create_spec(client_factory, instance, data_store_name,
vif_infos, extra_specs,
os_type=constants.DEFAULT_OS_TYPE,
profile_spec=None, metadata=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = instance.uuid
config_spec.guestId = os_type
# The name is the unique identifier for the VM.
config_spec.instanceUuid = instance.uuid
if metadata:
config_spec.annotation = metadata
# set the Hardware version
config_spec.version = extra_specs.hw_version
# Allow nested hypervisor instances to host 64 bit VMs.
if os_type in ("vmkernel5Guest", "vmkernel6Guest", "windowsHyperVGuest"):
config_spec.nestedHVEnabled = "True"
# Append the profile spec
if profile_spec:
config_spec.vmProfile = [profile_spec]
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance.vcpus)
if extra_specs.cores_per_socket:
config_spec.numCoresPerSocket = int(extra_specs.cores_per_socket)
config_spec.memoryMB = int(instance.memory_mb)
# Configure cpu information
if extra_specs.cpu_limits.has_limits():
config_spec.cpuAllocation = _get_allocation_info(
client_factory, extra_specs.cpu_limits,
'ns0:ResourceAllocationInfo')
# Configure memory information
if extra_specs.memory_limits.has_limits():
config_spec.memoryAllocation = _get_allocation_info(
client_factory, extra_specs.memory_limits,
'ns0:ResourceAllocationInfo')
devices = []
for vif_info in vif_infos:
vif_spec = _create_vif_spec(client_factory, vif_info)
devices.append(vif_spec)
serial_port_spec = create_serial_port_spec(client_factory)
if serial_port_spec:
devices.append(serial_port_spec)
config_spec.deviceChange = devices
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance.uuid
extra_config.append(opt)
port_index = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
extra_config.append(_iface_id_option_value(client_factory,
vif_info['iface_id'],
port_index))
port_index += 1
if (CONF.vmware.console_delay_seconds and
CONF.vmware.console_delay_seconds > 0):
opt = client_factory.create('ns0:OptionValue')
opt.key = 'keyboard.typematicMinDelay'
opt.value = CONF.vmware.console_delay_seconds * 1000000
extra_config.append(opt)
config_spec.extraConfig = extra_config
# Set the VM to be 'managed' by 'OpenStack'
managed_by = client_factory.create('ns0:ManagedByInfo')
managed_by.extensionKey = constants.EXTENSION_KEY
managed_by.type = constants.EXTENSION_TYPE_INSTANCE
config_spec.managedBy = managed_by
return config_spec
def create_serial_port_spec(client_factory):
"""Creates config spec for serial port."""
if not CONF.vmware.serial_port_service_uri:
return
backing = client_factory.create('ns0:VirtualSerialPortURIBackingInfo')
backing.direction = "server"
backing.serviceURI = CONF.vmware.serial_port_service_uri
backing.proxyURI = CONF.vmware.serial_port_proxy_uri
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
serial_port = client_factory.create('ns0:VirtualSerialPort')
serial_port.connectable = connectable_spec
serial_port.backing = backing
# we are using unique negative integers as temporary keys
serial_port.key = -2
serial_port.yieldOnPoll = True
dev_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
dev_spec.operation = "add"
dev_spec.device = serial_port
return dev_spec
def get_vm_boot_spec(client_factory, device):
"""Returns updated boot settings for the instance.
The boot order for the instance will be changed to have the
input device as the boot disk.
"""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
boot_disk = client_factory.create(
'ns0:VirtualMachineBootOptionsBootableDiskDevice')
boot_disk.deviceKey = device.key
boot_options = client_factory.create('ns0:VirtualMachineBootOptions')
boot_options.bootOrder = [boot_disk]
config_spec.bootOptions = boot_options
return config_spec
def get_vm_resize_spec(client_factory, vcpus, memory_mb, extra_specs,
metadata=None):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = vcpus
resize_spec.memoryMB = memory_mb
resize_spec.cpuAllocation = _get_allocation_info(
client_factory, extra_specs.cpu_limits,
'ns0:ResourceAllocationInfo')
if metadata:
resize_spec.annotation = metadata
return resize_spec
def create_controller_spec(client_factory, key,
adapter_type=constants.DEFAULT_ADAPTER_TYPE):
"""Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == constants.ADAPTER_TYPE_BUSLOGIC:
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == constants.ADAPTER_TYPE_LSILOGICSAS:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
elif adapter_type == constants.ADAPTER_TYPE_PARAVIRTUAL:
virtual_controller = client_factory.create(
'ns0:ParaVirtualSCSIController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = 0
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def convert_vif_model(name):
"""Converts standard VIF_MODEL types to the internal VMware ones."""
if name == network_model.VIF_MODEL_E1000:
return 'VirtualE1000'
if name == network_model.VIF_MODEL_E1000E:
return 'VirtualE1000e'
if name == network_model.VIF_MODEL_PCNET:
return 'VirtualPCNet32'
if name == network_model.VIF_MODEL_SRIOV:
return 'VirtualSriovEthernetCard'
if name == network_model.VIF_MODEL_VMXNET:
return 'VirtualVmxnet'
if name == network_model.VIF_MODEL_VMXNET3:
return 'VirtualVmxnet3'
if name not in ALL_SUPPORTED_NETWORK_DEVICES:
msg = _('%s is not supported.') % name
raise exception.Invalid(msg)
return name
def _create_vif_spec(client_factory, vif_info):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
vif_info['vif_model'] = convert_vif_model(vif_info['vif_model'])
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing = client_factory.create(
'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo')
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing = client_factory.create(
'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo')
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
network_spec.device = net_device
return network_spec
def get_network_attach_config_spec(client_factory, vif_info, index):
"""Builds the vif attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
vif_spec = _create_vif_spec(client_factory, vif_info)
config_spec.deviceChange = [vif_spec]
if vif_info['iface_id'] is not None:
config_spec.extraConfig = [_iface_id_option_value(client_factory,
vif_info['iface_id'],
index)]
return config_spec
def get_network_detach_config_spec(client_factory, device, port_index):
"""Builds the vif detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
config_spec.deviceChange = [virtual_device_config]
# If a key is already present then it cannot be deleted, only updated.
# This enables us to reuse this key if there is an additional
# attachment. The keys need to be preserved. This is due to the fact
# that there is logic on the ESX that does the network wiring
# according to these values. If they are changed then this will
# break networking to and from the interface.
config_spec.extraConfig = [_iface_id_option_value(client_factory,
'free',
port_index)]
return config_spec
def get_storage_profile_spec(session, storage_policy):
"""Gets the vm profile spec configured for storage policy."""
profile_id = pbm.get_profile_id_by_name(session, storage_policy)
if profile_id:
client_factory = session.vim.client.factory
storage_profile_spec = client_factory.create(
'ns0:VirtualMachineDefinedProfileSpec')
storage_profile_spec.profileId = profile_id.uniqueId
return storage_profile_spec
def get_vmdk_attach_config_spec(client_factory,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None,
disk_io_limits=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = _create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name, disk_io_limits)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
controller_key,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in six.iteritems(extra_opts):
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def _get_device_capacity(device):
# Devices pre-vSphere-5.5 only reports capacityInKB, which has
# rounding inaccuracies. Use that only if the more accurate
# attribute is absent.
if hasattr(device, 'capacityInBytes'):
return device.capacityInBytes
else:
return device.capacityInKB * units.Ki
def _get_device_disk_type(device):
if getattr(device.backing, 'thinProvisioned', False):
return constants.DISK_TYPE_THIN
else:
if getattr(device.backing, 'eagerlyScrub', False):
return constants.DISK_TYPE_EAGER_ZEROED_THICK
else:
return constants.DEFAULT_DISK_TYPE
def get_vmdk_info(session, vm_ref, uuid=None):
"""Returns information for the primary VMDK attached to the given VM."""
hardware_devices = session._call_method(vim_util,
"get_dynamic_property", vm_ref, "VirtualMachine",
"config.hardware.device")
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
capacity_in_bytes = 0
# Determine if we need to get the details of the root disk
root_disk = None
root_device = None
if uuid:
root_disk = '%s.vmdk' % uuid
vmdk_device = None
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
path = ds_obj.DatastorePath.parse(device.backing.fileName)
if root_disk and path.basename == root_disk:
root_device = device
vmdk_device = device
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = constants.DEFAULT_ADAPTER_TYPE
elif device.__class__.__name__ == "VirtualBusLogicController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_BUSLOGIC
elif device.__class__.__name__ == "VirtualIDEController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_IDE
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_LSILOGICSAS
elif device.__class__.__name__ == "ParaVirtualSCSIController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_PARAVIRTUAL
if root_disk:
vmdk_device = root_device
if vmdk_device:
vmdk_file_path = vmdk_device.backing.fileName
capacity_in_bytes = _get_device_capacity(vmdk_device)
vmdk_controller_key = vmdk_device.controllerKey
disk_type = _get_device_disk_type(vmdk_device)
adapter_type = adapter_type_dict.get(vmdk_controller_key)
return VmdkInfo(vmdk_file_path, adapter_type, disk_type,
capacity_in_bytes, vmdk_device)
scsi_controller_classes = {
'ParaVirtualSCSIController': constants.ADAPTER_TYPE_PARAVIRTUAL,
'VirtualLsiLogicController': constants.DEFAULT_ADAPTER_TYPE,
'VirtualLsiLogicSASController': constants.ADAPTER_TYPE_LSILOGICSAS,
'VirtualBusLogicController': constants.ADAPTER_TYPE_PARAVIRTUAL,
}
def get_scsi_adapter_type(hardware_devices):
"""Selects a proper iscsi adapter type from the existing
hardware devices
"""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ in scsi_controller_classes:
# find the controllers which still have available slots
if len(device.device) < constants.SCSI_MAX_CONNECT_NUMBER:
# return the first match one
return scsi_controller_classes[device.__class__.__name__]
raise exception.StorageError(
reason=_("Unable to find iSCSI Target"))
def _find_controller_slot(controller_keys, taken, max_unit_number):
for controller_key in controller_keys:
for unit_number in range(max_unit_number):
if unit_number not in taken.get(controller_key, []):
return controller_key, unit_number
def _is_ide_controller(device):
return device.__class__.__name__ == 'VirtualIDEController'
def _is_scsi_controller(device):
return device.__class__.__name__ in ['VirtualLsiLogicController',
'VirtualLsiLogicSASController',
'VirtualBusLogicController',
'ParaVirtualSCSIController']
def _find_allocated_slots(devices):
"""Return dictionary which maps controller_key to list of allocated unit
numbers for that controller_key.
"""
taken = {}
for device in devices:
if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'):
unit_numbers = taken.setdefault(device.controllerKey, [])
unit_numbers.append(device.unitNumber)
if _is_scsi_controller(device):
# the SCSI controller sits on its own bus
unit_numbers = taken.setdefault(device.key, [])
unit_numbers.append(device.scsiCtlrUnitNumber)
return taken
def allocate_controller_key_and_unit_number(client_factory, devices,
adapter_type):
"""This function inspects the current set of hardware devices and returns
controller_key and unit_number that can be used for attaching a new virtual
disk to adapter with the given adapter_type.
"""
if devices.__class__.__name__ == "ArrayOfVirtualDevice":
devices = devices.VirtualDevice
taken = _find_allocated_slots(devices)
ret = None
if adapter_type == constants.ADAPTER_TYPE_IDE:
ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)]
ret = _find_controller_slot(ide_keys, taken, 2)
elif adapter_type in [constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)]
ret = _find_controller_slot(scsi_keys, taken, 16)
if ret:
return ret[0], ret[1], None
# create new controller with the specified type and return its spec
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key,
adapter_type)
return controller_key, 0, controller_spec
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_vmdk_create_spec(client_factory, size_in_kb,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def _create_virtual_disk_spec(client_factory, controller_key,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None,
disk_io_limits=None):
"""Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == constants.DISK_TYPE_THIN:
disk_file_backing.thinProvisioned = True
else:
if disk_type == constants.DISK_TYPE_EAGER_ZEROED_THICK:
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
if disk_io_limits and disk_io_limits.has_limits():
virtual_disk.storageIOAllocation = _get_allocation_info(
client_factory, disk_io_limits,
'ns0:StorageIOAllocationInfo')
virtual_device_config.device = virtual_disk
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False, config=None):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
if config is not None:
clone_spec.config = config
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing"):
"""Builds the VM relocation spec."""
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
if host:
rel_spec.host = host
return rel_spec
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
port_group_name, vlan_id):
"""Builds the virtual switch port group add spec."""
vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
vswitch_port_group_spec.name = port_group_name
vswitch_port_group_spec.vswitchName = vswitch_name
# VLAN ID of 0 means that VLAN tagging is not to be done for the network.
vswitch_port_group_spec.vlanId = int(vlan_id)
policy = client_factory.create('ns0:HostNetworkPolicy')
nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
nicteaming.notifySwitches = True
policy.nicTeaming = nicteaming
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
opt_keymap = client_factory.create('ns0:OptionValue')
opt_keymap.key = "RemoteDisplay.vnc.keyMap"
opt_keymap.value = CONF.vnc.keymap
extras = [opt_enabled, opt_port, opt_keymap]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
def get_vnc_port(session):
"""Return VNC port for an VM or None if there is no available port."""
min_port = CONF.vmware.vnc_port
port_total = CONF.vmware.vnc_port_total
allocated_ports = _get_allocated_vnc_ports(session)
max_port = min_port + port_total
for port in range(min_port, max_port):
if port not in allocated_ports:
return port
raise exception.ConsolePortRangeExhausted(min_port=min_port,
max_port=max_port)
def _get_allocated_vnc_ports(session):
"""Return an integer set of all allocated VNC ports."""
# TODO(rgerganov): bug #1256944
# The VNC port should be unique per host, not per vCenter
vnc_ports = set()
result = session._call_method(vim_util, "get_objects",
"VirtualMachine", [VNC_CONFIG_KEY])
while result:
for obj in result.objects:
if not hasattr(obj, 'propSet'):
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
result = session._call_method(vutil, 'continue_retrieval',
result)
return vnc_ports
def _get_object_for_value(results, value):
for object in results.objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_for_optionvalue(results, value):
for object in results.objects:
if hasattr(object, "propSet") and object.propSet:
if object.propSet[0].val.value == value:
return object.obj
def _get_object_from_results(session, results, value, func):
while results:
object = func(results, value)
if object:
session._call_method(vutil, 'cancel_retrieval',
results)
return object
results = session._call_method(vutil, 'continue_retrieval',
results)
def _get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
@vm_ref_cache_from_name
def get_vm_ref_from_name(session, vm_name):
return (_get_vm_ref_from_vm_uuid(session, vm_name) or
_get_vm_ref_from_name(session, vm_name))
def _get_vm_ref_from_uuid(session, instance_uuid):
"""Get reference to the VM with the uuid specified.
This method reads all of the names of the VM's that are running
on the backend, then it filters locally the matching
instance_uuid. It is far more optimal to use
_get_vm_ref_from_vm_uuid.
"""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_value)
def _get_vm_ref_from_vm_uuid(session, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
This method finds all VM's on the backend that match the
instance_uuid, more specifically all VM's on the backend that have
'config_spec.instanceUuid' set to 'instance_uuid'.
"""
vm_refs = session._call_method(
session.vim,
"FindAllByUuid",
session.vim.service_content.searchIndex,
uuid=instance_uuid,
vmSearch=True,
instanceUuid=True)
if vm_refs:
return vm_refs[0]
def _get_vm_ref_from_extraconfig(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_optionvalue)
@vm_ref_cache_from_instance
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
uuid = instance.uuid
vm_ref = (search_vm_ref_by_identifier(session, uuid) or
_get_vm_ref_from_name(session, instance.name))
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=uuid)
return vm_ref
def search_vm_ref_by_identifier(session, identifier):
"""Searches VM reference using the identifier.
This method is primarily meant to separate out part of the logic for
vm_ref search that could be use directly in the special case of
migrating the instance. For querying VM linked to an instance always
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
_get_vm_ref_from_extraconfig(session, identifier) or
_get_vm_ref_from_uuid(session, identifier))
return vm_ref
def get_host_ref_for_vm(session, instance):
"""Get a MoRef to the ESXi host currently running an instance."""
vm_ref = get_vm_ref(session, instance)
return session._call_method(vim_util, "get_dynamic_property",
vm_ref, "VirtualMachine", "runtime.host")
def get_host_name_for_vm(session, instance):
"""Get the hostname of the ESXi host currently running an instance."""
host_ref = get_host_ref_for_vm(session, instance)
return session._call_method(vim_util, "get_dynamic_property",
host_ref, "HostSystem", "name")
def get_vm_state(session, instance):
vm_ref = get_vm_ref(session, instance)
vm_state = session._call_method(vim_util, "get_dynamic_property",
vm_ref, "VirtualMachine", "runtime.powerState")
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
vcpus = 0
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vim_util, "get_dynamic_properties",
cluster, "ClusterComputeResource",
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
if (runtime_summary.inMaintenanceMode is False and
runtime_summary.connectionState == "connected"):
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
vcpus += hardware_summary.numCpuThreads
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vim_util, "get_dynamic_property",
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'vcpus': vcpus, 'mem': mem_info}
return stats
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
session._call_method(vutil, 'cancel_retrieval',
results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vim_util, "get_dynamic_property",
cluster, "ClusterComputeResource",
"host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
| http://pubs.vmware.com/vsphere-51/index.jsp
| #com.vmware.wssdk.apiref.doc/
| vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
return {prop.name: prop.val for prop in propset}
def get_vmdk_backed_disk_device(hardware_devices, uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster):
"""Get the resource pool."""
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vim_util,
"get_dynamic_property",
cluster,
"ClusterComputeResource",
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
session._call_method(vutil, 'cancel_retrieval',
results)
return results.objects
except Exception as excep:
LOG.warning(_LW("Failed to get cluster references %s"), excep)
def get_cluster_ref_by_name(session, cluster_name):
"""Get reference to the vCenter cluster with the specified name."""
all_clusters = get_all_cluster_mors(session)
for cluster in all_clusters:
if (hasattr(cluster, 'propSet') and
cluster.propSet[0].val == cluster_name):
return cluster.obj
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic & ParaVirtual
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type in [constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
vmdk_adapter_type = constants.DEFAULT_ADAPTER_TYPE
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
"""Create VM on ESX host."""
LOG.debug("Creating VM on the ESX host", instance=instance)
vm_create_task = session._call_method(
session.vim,
"CreateVM_Task", vm_folder,
config=config_spec, pool=res_pool_ref)
try:
task_info = session._wait_for_task(vm_create_task)
except vexc.VMwareDriverException:
# An invalid guestId will result in an error with no specific fault
# type and the generic error 'A specified parameter was not correct'.
# As guestId is user-editable, we try to help the user out with some
# additional information if we notice that guestId isn't in our list of
# known-good values.
# We don't check this in advance or do anything more than warn because
# we can't guarantee that our list of known-good guestIds is complete.
# Consequently, a value which we don't recognise may in fact be valid.
with excutils.save_and_reraise_exception():
if config_spec.guestId not in constants.VALID_OS_TYPES:
LOG.warning(_LW('vmware_ostype from image is not recognised: '
'\'%(ostype)s\'. An invalid os type may be '
'one cause of this instance creation failure'),
{'ostype': config_spec.guestId})
LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result
def destroy_vm(session, instance, vm_ref=None):
"""Destroy a VM instance. Assumes VM is powered off."""
try:
if not vm_ref:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Destroying the VM", instance=instance)
destroy_task = session._call_method(session.vim, "Destroy_Task",
vm_ref)
session._wait_for_task(destroy_task)
LOG.info(_LI("Destroyed the VM"), instance=instance)
except Exception:
LOG.exception(_LE('Destroy VM failed'), instance=instance)
def create_virtual_disk(session, dc_ref, adapter_type, disk_type,
virtual_disk_path, size_in_kb):
# Create a Virtual Disk of the size of the flat vmdk file. This is
# done just to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
LOG.debug("Creating Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the data store",
{"vmdk_file_size_in_kb": size_in_kb,
"adapter_type": adapter_type})
vmdk_create_spec = get_vmdk_create_spec(
session.vim.client.factory,
size_in_kb,
adapter_type,
disk_type)
vmdk_create_task = session._call_method(
session.vim,
"CreateVirtualDisk_Task",
session.vim.service_content.virtualDiskManager,
name=virtual_disk_path,
datacenter=dc_ref,
spec=vmdk_create_spec)
session._wait_for_task(vmdk_create_task)
LOG.debug("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB and type %(disk_type)s",
{"vmdk_file_size_in_kb": size_in_kb,
"disk_type": disk_type})
def copy_virtual_disk(session, dc_ref, source, dest):
"""Copy a sparse virtual disk to a thin virtual disk.
This is also done to generate the meta-data file whose specifics
depend on the size of the disk, thin/thick provisioning and the
storage adapter type.
:param session: - session for connection
:param dc_ref: - data center reference object
:param source: - source datastore path
:param dest: - destination datastore path
:returns: None
"""
LOG.debug("Copying Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
vim = session.vim
vmdk_copy_task = session._call_method(
vim,
"CopyVirtualDisk_Task",
vim.service_content.virtualDiskManager,
sourceName=source,
sourceDatacenter=dc_ref,
destName=dest)
session._wait_for_task(vmdk_copy_task)
LOG.debug("Copied Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
def reconfigure_vm(session, vm_ref, config_spec):
"""Reconfigure a VM according to the config spec."""
reconfig_task = session._call_method(session.vim,
"ReconfigVM_Task", vm_ref,
spec=config_spec)
session._wait_for_task(reconfig_task)
def power_on_instance(session, instance, vm_ref=None):
"""Power on the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering on the VM", instance=instance)
try:
poweron_task = session._call_method(
session.vim,
"PowerOnVM_Task", vm_ref)
session._wait_for_task(poweron_task)
LOG.debug("Powered on the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered on", instance=instance)
def _get_vm_port_indices(session, vm_ref):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
ports = []
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value != 'free'):
ports.append(int(option.key.split('.')[2]))
return ports
def get_attach_port_index(session, vm_ref):
"""Get the first free port index."""
ports = _get_vm_port_indices(session, vm_ref)
# No ports are configured on the VM
if not ports:
return 0
ports.sort()
configured_ports_len = len(ports)
# Find the first free port index
for port_index in range(configured_ports_len):
if port_index != ports[port_index]:
return port_index
return configured_ports_len
def get_vm_detach_port_index(session, vm_ref, iface_id):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value == iface_id):
return int(option.key.split('.')[2])
def power_off_instance(session, instance, vm_ref=None):
"""Power off the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering off the VM", instance=instance)
try:
poweroff_task = session._call_method(session.vim,
"PowerOffVM_Task", vm_ref)
session._wait_for_task(poweroff_task)
LOG.debug("Powered off the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered off", instance=instance)
def find_rescue_device(hardware_devices, instance):
"""Returns the rescue device.
The method will raise an exception if the rescue device does not
exist. The resuce device has suffix '-rescue.vmdk'.
:param hardware_devices: the hardware devices for the instance
:param instance: nova.objects.instance.Instance object
:return: the rescue disk device object
"""
for device in hardware_devices.VirtualDevice:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
'VirtualDiskFlatVer2BackingInfo' and
device.backing.fileName.endswith('-rescue.vmdk')):
return device
msg = _('Rescue device does not exist for instance %s') % instance.uuid
raise exception.NotFound(msg)
def get_ephemeral_name(id):
return 'ephemeral_%d.vmdk' % id
def _detach_and_delete_devices_config_spec(client_factory, devices):
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
for device in devices:
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
virtual_device_config.fileOperation = "destroy"
device_config_spec.append(virtual_device_config)
config_spec.deviceChange = device_config_spec
return config_spec
def detach_devices_from_vm(session, vm_ref, devices):
"""Detach specified devices from VM."""
client_factory = session.vim.client.factory
config_spec = _detach_and_delete_devices_config_spec(
client_factory, devices)
reconfigure_vm(session, vm_ref, config_spec)
def get_ephemerals(session, vm_ref):
devices = []
hardware_devices = session._call_method(vim_util,
"get_dynamic_property", vm_ref, "VirtualMachine",
"config.hardware.device")
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if (device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo"):
if 'ephemeral' in device.backing.fileName:
devices.append(device)
return devices
def get_swap(session, vm_ref):
hardware_devices = session._call_method(vim_util,
"get_dynamic_property", vm_ref, "VirtualMachine",
"config.hardware.device")
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
'swap' in device.backing.fileName):
return device
|
{
"content_hash": "73240943c64031364f96102b02c03734",
"timestamp": "",
"source": "github",
"line_count": 1518,
"max_line_length": 79,
"avg_line_length": 38.7266139657444,
"alnum_prop": 0.6235222072907275,
"repo_name": "isyippee/nova",
"id": "b61fac4a090e3f0494495465a29e13df501f1748",
"size": "59538",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/virt/vmwareapi/vm_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16597219"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259485"
}
],
"symlink_target": ""
}
|
from weboob.core import WebNip
import sys
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
class Connector(object):
'''
Connector is a tool that connects to common websites like bank website,
phone operator website... and that grabs personal data from there.
Credentials are required to make this operation.
Technically, connectors are weboob backend wrappers.
'''
def __init__(self, connector, parameters):
'''
Constructor: initialize connector, set up the weboob backend.
'''
weboob = WebNip()
self.backend = weboob.build_backend(connector, parameters)
def get_balances(self):
'''
Grab results returned by connector after activation.
Issue: connectors are blocking, they should not.
'''
results = []
for account in self.backend.iter_accounts():
if repr(account.iban) == "NotLoaded":
results.append({
"accountNumber": account.id,
"label": account.label,
"balance": unicode(account.balance)
})
else:
results.append({
"accountNumber": account.id,
"label": account.label,
"balance": unicode(account.balance),
"iban": unicode(account.iban)
})
return results
def get_history(self):
'''
Return accounts history. It takes all the resutl it can scrap
on the given website.
'''
results = []
for account in self.backend.iter_accounts():
try:
for history in self.backend.iter_history(account):
results.append({
"account": account.id,
"amount": str(history.amount),
"date": history.date.strftime(DATETIME_FORMAT),
"rdate": history.rdate.strftime(DATETIME_FORMAT),
"label": unicode(history.label),
"raw": unicode(history.raw),
"type": history.type
})
except NotImplementedError:
print >> sys.stderr, "The account type has not been implemented by weboob."
return results
|
{
"content_hash": "1935a6060586b52767cba07bcb0454a0",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 91,
"avg_line_length": 34.38235294117647,
"alnum_prop": 0.5286569717707442,
"repo_name": "rlustin/kresus",
"id": "a24483842fd7911791b028f89f7660799b9db68b",
"size": "2338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weboob/py/connector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22923"
},
{
"name": "CoffeeScript",
"bytes": "106013"
},
{
"name": "HTML",
"bytes": "1473"
},
{
"name": "JavaScript",
"bytes": "169696"
},
{
"name": "Makefile",
"bytes": "531"
},
{
"name": "Python",
"bytes": "6155"
},
{
"name": "Shell",
"bytes": "2724"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from tick.array_test.build.array_test import test_sum_double_pointer, \
test_sum_ArrayDouble, test_sum_SArray_shared_ptr, test_sum_VArray_shared_ptr
"""
ref_size = 10000
ref_n_loops = 10000
start = time.process_time()
ref_result = test_sum_double_pointer(ref_size, ref_n_loops)
end = time.process_time()
ref_needed_time = end - start
"""
class Test(unittest.TestCase):
"""
def test_array_speed(self):
\"""...Test speed of ArrayDouble is equivalent to a double pointer array
\"""
start = time.process_time()
result = test_sum_ArrayDouble(ref_size, ref_n_loops)
end = time.process_time()
needed_time = end - start
self.assertEqual(result, ref_result)
if needed_time > ref_needed_time:
np.testing.assert_allclose(needed_time, ref_needed_time, rtol=0.2)
def test_sarrayptr_speed(self):
\"""...Test speed of SArrayDoublePtr is equivalent to a double pointer
array
\"""
start = time.process_time()
result = test_sum_SArray_shared_ptr(ref_size, ref_n_loops)
end = time.process_time()
needed_time = end - start
self.assertEqual(result, ref_result)
if needed_time > ref_needed_time:
np.testing.assert_allclose(needed_time, ref_needed_time, rtol=0.2)
def test_varrayptr_speed(self):
\"""...Test speed of VArrayDoublePtr is equivalent to a double pointer
array
\"""
start = time.process_time()
result = test_sum_VArray_shared_ptr(ref_size, ref_n_loops)
end = time.process_time()
needed_time = end - start
self.assertEqual(result, ref_result)
if needed_time > ref_needed_time:
np.testing.assert_allclose(needed_time, ref_needed_time, rtol=0.1)
"""
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "d0af7f3b721c8ab0ffaa816a52fbcb06",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 80,
"avg_line_length": 33.80357142857143,
"alnum_prop": 0.6265187533016376,
"repo_name": "X-DataInitiative/tick",
"id": "2985ba707eb072fc2182dd2abdd183c9deb93239",
"size": "1942",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tick/array_test/tests/array_performance_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5890"
},
{
"name": "C++",
"bytes": "1246006"
},
{
"name": "CMake",
"bytes": "25186"
},
{
"name": "Dockerfile",
"bytes": "2039"
},
{
"name": "Python",
"bytes": "1492424"
},
{
"name": "SWIG",
"bytes": "192101"
},
{
"name": "Shell",
"bytes": "32367"
}
],
"symlink_target": ""
}
|
"""All URLs for the extension."""
from django.conf.urls import url
from .views import (
IndexPage,
AllWebResourcesPage,
AddWebResourcePage,
SingleWebResourcePage,
RemoveWebResourcePage,
ReorderWebResourcesAjax,
UpdateWebResourceAjax,
AllWebResourcesAPI,
SingleWebResourceAPI
)
urlpatterns = [
# ###########################
# ADMIN PAGES
# ###########################
url(
r'^admin/webresources/$',
IndexPage.as_view(),
name='index'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'webresources/$',
AllWebResourcesPage.as_view(),
name='all_webresources'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'webresources/add/$',
AddWebResourcePage.as_view(),
name='webresource_add'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'webresources/(?P<webresource_id>[0-9]+)/$',
SingleWebResourcePage.as_view(),
name='single_webresource'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'webresources/(?P<webresource_id>[0-9]+)/remove/$',
RemoveWebResourcePage.as_view(),
name='webresource_remove'),
# ###########################
# ADMIN AJAX
# ###########################
url(
r'^ajax/projects/(?P<project_id>[0-9]+)/'
r'webresources/reorder/$',
ReorderWebResourcesAjax.as_view(),
name='ajax_webresources_reorder'),
url(
r'^ajax/projects/(?P<project_id>[0-9]+)/'
r'webresources/(?P<webresource_id>[0-9]+)/$',
UpdateWebResourceAjax.as_view(),
name='ajax_webresource_update'),
# ###########################
# PUBLIC API
# ###########################
url(
r'^api/projects/(?P<project_id>[0-9]+)/'
r'webresources/$',
AllWebResourcesAPI.as_view(),
name='api_all_webresources'),
url(
r'^api/projects/(?P<project_id>[0-9]+)/'
r'webresources/(?P<webresource_id>[0-9]+)/$',
SingleWebResourceAPI.as_view(),
name='api_single_webresource')
]
|
{
"content_hash": "51e1034a8cf228987ba30a405912b1d2",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 60,
"avg_line_length": 27.649350649350648,
"alnum_prop": 0.5232503522780648,
"repo_name": "ExCiteS/geokey-webresources",
"id": "fa3260de46d7d2c9a83696707a599927a82913ff",
"size": "2129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geokey_webresources/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "26005"
},
{
"name": "Python",
"bytes": "128079"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.