text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from oslo_utils import uuidutils
from cinder.tests.functional import functional_helpers
from cinder.volume import configuration
class GroupsTest(functional_helpers._FunctionalTestBase):
_vol_type_name = 'functional_test_type'
_grp_type_name = 'functional_grp_test_type'
osapi_version_major = '3'
osapi_version_minor = '20'
def setUp(self):
super(GroupsTest, self).setUp()
self.volume_type = self.api.create_type(self._vol_type_name)
self.group_type = self.api.create_group_type(self._grp_type_name)
self.group1 = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
def _get_flags(self):
f = super(GroupsTest, self)._get_flags()
f['volume_driver'] = (
{'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver',
'g': configuration.SHARED_CONF_GROUP})
f['default_volume_type'] = {'v': self._vol_type_name}
f['default_group_type'] = {'v': self._grp_type_name}
return f
def test_get_groups_summary(self):
"""Simple check that listing groups works."""
grps = self.api.get_groups(False)
self.assertIsNotNone(grps)
def test_get_groups(self):
"""Simple check that listing groups works."""
grps = self.api.get_groups()
self.assertIsNotNone(grps)
def test_reset_group_status(self):
"""Reset group status"""
found_group = self._poll_group_while(self.group1['id'],
['creating'])
self.assertEqual('available', found_group['status'])
self.api.reset_group(self.group1['id'],
{"reset_status": {"status": "error"}})
group = self.api.get_group(self.group1['id'])
self.assertEqual("error", group['status'])
def test_create_and_delete_group(self):
"""Creates and deletes a group."""
# Create group
created_group = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(uuidutils.is_uuid_like(created_group['id']))
created_group_id = created_group['id']
# Check it's there
found_group = self._poll_group_while(created_group_id,
['creating'])
self.assertEqual(created_group_id, found_group['id'])
self.assertEqual(self.group_type['id'], found_group['group_type'])
self.assertEqual('available', found_group['status'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': created_group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(uuidutils.is_uuid_like(created_volume['id']))
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
self.assertEqual(created_group_id, found_volume['group_id'])
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Delete the original group
self.api.delete_group(created_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(created_group_id, ['deleting'])
# Should be gone
self.assertIsNone(found_volume)
self.assertIsNone(found_group)
|
{
"content_hash": "0deb7b8efc7856d88cc07d46930e32b3",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 41.03061224489796,
"alnum_prop": 0.5901517035563293,
"repo_name": "Datera/cinder",
"id": "13f22b1795aa168501015798e75d4e47f17bf0e7",
"size": "4652",
"binary": false,
"copies": "3",
"ref": "refs/heads/datera_queens_backport",
"path": "cinder/tests/functional/test_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15242306"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
def extractStrayCats(item):
"""
'StrayCats'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
tagmap = [
('Virtual World: The Legendary Thief', 'Virtual World: The Legendary Thief', 'translated'),
('Hiraheishi wa Kako o Yumemiru', 'Hiraheishi wa Kako o Yumemiru', 'translated'),
('Ore no Isekai Shimai ga Jichou Shinai!', 'Ore no Isekai Shimai ga Jichou Shinai!', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "9a15b8f69f5be8a9d0084f0920efb6c7",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 129,
"avg_line_length": 41,
"alnum_prop": 0.6277278562259306,
"repo_name": "fake-name/ReadableWebProxy",
"id": "129af07982dca27156e85a2a71aad79c0d69b2e0",
"size": "779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractStrayCats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
"""
This module implements functionality required by master processes, such as
starting new contexts via SSH. Its size is also restricted, since it must
be sent to any context that will be used to establish additional child
contexts.
"""
import dis
import imp
import inspect
import itertools
import logging
import os
import pkgutil
import re
import string
import sys
import threading
import types
import zlib
if not hasattr(pkgutil, 'find_loader'):
# find_loader() was new in >=2.5, but the modern pkgutil.py syntax has
# been kept intentionally 2.3 compatible so we can reuse it.
from mitogen.compat import pkgutil
import mitogen
import mitogen.core
import mitogen.minify
import mitogen.parent
from mitogen.core import b
from mitogen.core import to_text
from mitogen.core import LOG
from mitogen.core import IOLOG
imap = getattr(itertools, 'imap', map)
izip = getattr(itertools, 'izip', zip)
RLOG = logging.getLogger('mitogen.ctx')
def _stdlib_paths():
"""Return a set of paths from which Python imports the standard library.
"""
attr_candidates = [
'prefix',
'real_prefix', # virtualenv: only set inside a virtual environment.
'base_prefix', # venv: always set, equal to prefix if outside.
]
prefixes = (getattr(sys, a) for a in attr_candidates if hasattr(sys, a))
version = 'python%s.%s' % sys.version_info[0:2]
return set(os.path.abspath(os.path.join(p, 'lib', version))
for p in prefixes)
def get_child_modules(path):
it = pkgutil.iter_modules([os.path.dirname(path)])
return [to_text(name) for _, name, _ in it]
def get_core_source():
"""
Master version of parent.get_core_source().
"""
source = inspect.getsource(mitogen.core)
return mitogen.minify.minimize_source(source)
if mitogen.is_master:
# TODO: find a less surprising way of installing this.
mitogen.parent.get_core_source = get_core_source
LOAD_CONST = dis.opname.index('LOAD_CONST')
IMPORT_NAME = dis.opname.index('IMPORT_NAME')
if sys.version_info < (3, 0):
def iter_opcodes(co):
# Yield `(op, oparg)` tuples from the code object `co`.
ordit = imap(ord, co.co_code)
nextb = ordit.next
return ((c, (None
if c < dis.HAVE_ARGUMENT else
(nextb() | (nextb() << 8))))
for c in ordit)
elif sys.version_info < (3, 6):
def iter_opcodes(co):
# Yield `(op, oparg)` tuples from the code object `co`.
ordit = iter(co.co_code)
nextb = ordit.__next__
return ((c, (None
if c < dis.HAVE_ARGUMENT else
(nextb() | (nextb() << 8))))
for c in ordit)
else:
def iter_opcodes(co):
# Yield `(op, oparg)` tuples from the code object `co`.
ordit = iter(co.co_code)
nextb = ordit.__next__
# https://github.com/abarnert/cpython/blob/c095a32f/Python/wordcode.md
return ((c, nextb()) for c in ordit)
def scan_code_imports(co):
"""Given a code object `co`, scan its bytecode yielding any
``IMPORT_NAME`` and associated prior ``LOAD_CONST`` instructions
representing an `Import` statement or `ImportFrom` statement.
:return:
Generator producing `(level, modname, namelist)` tuples, where:
* `level`: -1 for normal import, 0, for absolute import, and >0 for
relative import.
* `modname`: Name of module to import, or from where `namelist` names
are imported.
* `namelist`: for `ImportFrom`, the list of names to be imported from
`modname`.
"""
opit = iter_opcodes(co)
opit, opit2, opit3 = itertools.tee(opit, 3)
try:
next(opit2)
next(opit3)
next(opit3)
except StopIteration:
return
for oparg1, oparg2, (op3, arg3) in izip(opit, opit2, opit3):
if op3 == IMPORT_NAME:
op2, arg2 = oparg2
op1, arg1 = oparg1
if op1 == op2 == LOAD_CONST:
yield (co.co_consts[arg1],
co.co_names[arg3],
co.co_consts[arg2] or ())
class ThreadWatcher(object):
"""
Manage threads that waits for nother threads to shutdown, before invoking
`on_join()`. In CPython it seems possible to use this method to ensure a
non-main thread is signalled when the main thread has exitted, using yet
another thread as a proxy.
"""
_lock = threading.Lock()
_pid = None
_instances_by_target = {}
_thread_by_target = {}
@classmethod
def _reset(cls):
"""If we have forked since the watch dictionaries were initialized, all
that has is garbage, so clear it."""
if os.getpid() != cls._pid:
cls._pid = os.getpid()
cls._instances_by_target.clear()
cls._thread_by_target.clear()
def __init__(self, target, on_join):
self.target = target
self.on_join = on_join
@classmethod
def _watch(cls, target):
target.join()
for watcher in cls._instances_by_target[target]:
watcher.on_join()
def install(self):
self._lock.acquire()
try:
self._reset()
self._instances_by_target.setdefault(self.target, []).append(self)
if self.target not in self._thread_by_target:
self._thread_by_target[self.target] = threading.Thread(
name='mitogen.master.join_thread_async',
target=self._watch,
args=(self.target,)
)
self._thread_by_target[self.target].start()
finally:
self._lock.release()
def remove(self):
self._lock.acquire()
try:
self._reset()
lst = self._instances_by_target.get(self.target, [])
if self in lst:
lst.remove(self)
finally:
self._lock.release()
@classmethod
def watch(cls, target, on_join):
watcher = cls(target, on_join)
watcher.install()
return watcher
class LogForwarder(object):
def __init__(self, router):
self._router = router
self._cache = {}
router.add_handler(
fn=self._on_forward_log,
handle=mitogen.core.FORWARD_LOG,
)
def _on_forward_log(self, msg):
if msg.is_dead:
return
logger = self._cache.get(msg.src_id)
if logger is None:
context = self._router.context_by_id(msg.src_id)
if context is None:
LOG.error('FORWARD_LOG received from src_id %d', msg.src_id)
return
name = '%s.%s' % (RLOG.name, context.name)
self._cache[msg.src_id] = logger = logging.getLogger(name)
name, level_s, s = msg.data.decode('latin1').split('\x00', 2)
logger.log(int(level_s), '%s: %s', name, s, extra={
'mitogen_message': s,
'mitogen_context': self._router.context_by_id(msg.src_id),
'mitogen_name': name,
})
def __repr__(self):
return 'LogForwarder(%r)' % (self._router,)
_STDLIB_PATHS = _stdlib_paths()
def is_stdlib_path(path):
return any(
os.path.commonprefix((libpath, path)) == libpath
and 'site-packages' not in path
and 'dist-packages' not in path
for libpath in _STDLIB_PATHS
)
def is_stdlib_name(modname):
"""Return ``True`` if `modname` appears to come from the standard
library."""
if imp.is_builtin(modname) != 0:
return True
module = sys.modules.get(modname)
if module is None:
return False
# six installs crap with no __file__
modpath = os.path.abspath(getattr(module, '__file__', ''))
return is_stdlib_path(modpath)
class ModuleFinder(object):
def __init__(self):
#: Import machinery is expensive, keep :py:meth`:get_module_source`
#: results around.
self._found_cache = {}
#: Avoid repeated dependency scanning, which is expensive.
self._related_cache = {}
def __repr__(self):
return 'ModuleFinder()'
def _looks_like_script(self, path):
"""
Return :data:`True` if the (possibly extensionless) file at `path`
resembles a Python script. For now we simply verify the file contains
ASCII text.
"""
fp = open(path, 'rb')
try:
sample = fp.read(512).decode('latin-1')
return not set(sample).difference(string.printable)
finally:
fp.close()
def _py_filename(self, path):
if not path:
return None
if path[-4:] in ('.pyc', '.pyo'):
path = path.rstrip('co')
if path.endswith('.py'):
return path
if os.path.exists(path) and self._looks_like_script(path):
return path
def _get_module_via_pkgutil(self, fullname):
"""Attempt to fetch source code via pkgutil. In an ideal world, this
would be the only required implementation of get_module()."""
try:
# Pre-'import spec' this returned None, in Python3.6 it raises
# ImportError.
loader = pkgutil.find_loader(fullname)
except ImportError:
e = sys.exc_info()[1]
LOG.debug('%r._get_module_via_pkgutil(%r): %s',
self, fullname, e)
return None
IOLOG.debug('%r._get_module_via_pkgutil(%r) -> %r',
self, fullname, loader)
if not loader:
return
try:
path = self._py_filename(loader.get_filename(fullname))
source = loader.get_source(fullname)
is_pkg = loader.is_package(fullname)
except (AttributeError, ImportError):
# - Per PEP-302, get_source() and is_package() are optional,
# calling them may throw AttributeError.
# - get_filename() may throw ImportError if pkgutil.find_loader()
# picks a "parent" package's loader for some crap that's been
# stuffed in sys.modules, for example in the case of urllib3:
# "loader for urllib3.contrib.pyopenssl cannot handle
# requests.packages.urllib3.contrib.pyopenssl"
e = sys.exc_info()[1]
LOG.debug('%r: loading %r using %r failed: %s',
self, fullname, loader)
return
if path is None or source is None:
return
if isinstance(source, mitogen.core.UnicodeType):
# get_source() returns "string" according to PEP-302, which was
# reinterpreted for Python 3 to mean a Unicode string.
source = source.encode('utf-8')
return path, source, is_pkg
def _get_module_via_sys_modules(self, fullname):
"""Attempt to fetch source code via sys.modules. This is specifically
to support __main__, but it may catch a few more cases."""
module = sys.modules.get(fullname)
LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module)
if not isinstance(module, types.ModuleType):
LOG.debug('sys.modules[%r] absent or not a regular module',
fullname)
return
path = self._py_filename(getattr(module, '__file__', ''))
if not path:
return
is_pkg = hasattr(module, '__path__')
try:
source = inspect.getsource(module)
except IOError:
# Work around inspect.getsourcelines() bug for 0-byte __init__.py
# files.
if not is_pkg:
raise
source = '\n'
if isinstance(source, mitogen.core.UnicodeType):
# get_source() returns "string" according to PEP-302, which was
# reinterpreted for Python 3 to mean a Unicode string.
source = source.encode('utf-8')
return path, source, is_pkg
get_module_methods = [_get_module_via_pkgutil,
_get_module_via_sys_modules]
def get_module_source(self, fullname):
"""Given the name of a loaded module `fullname`, attempt to find its
source code.
:returns:
Tuple of `(module path, source text, is package?)`, or ``None`` if
the source cannot be found.
"""
tup = self._found_cache.get(fullname)
if tup:
return tup
for method in self.get_module_methods:
tup = method(self, fullname)
if tup:
break
else:
tup = None, None, None
LOG.debug('get_module_source(%r): cannot find source', fullname)
self._found_cache[fullname] = tup
return tup
def resolve_relpath(self, fullname, level):
"""Given an ImportFrom AST node, guess the prefix that should be tacked
on to an alias name to produce a canonical name. `fullname` is the name
of the module in which the ImportFrom appears."""
mod = sys.modules.get(fullname, None)
if hasattr(mod, '__path__'):
fullname += '.__init__'
if level == 0 or not fullname:
return ''
bits = fullname.split('.')
if len(bits) <= level:
# This would be an ImportError in real code.
return ''
return '.'.join(bits[:-level]) + '.'
def generate_parent_names(self, fullname):
while '.' in fullname:
fullname, _, _ = fullname.rpartition('.')
yield fullname
def find_related_imports(self, fullname):
"""
Return a list of non-stdlb modules that are directly imported by
`fullname`, plus their parents.
The list is determined by retrieving the source code of
`fullname`, compiling it, and examining all IMPORT_NAME ops.
:param fullname: Fully qualified name of an _already imported_ module
for which source code can be retrieved
:type fullname: str
"""
related = self._related_cache.get(fullname)
if related is not None:
return related
modpath, src, _ = self.get_module_source(fullname)
if src is None:
return []
maybe_names = list(self.generate_parent_names(fullname))
co = compile(src, modpath, 'exec')
for level, modname, namelist in scan_code_imports(co):
if level == -1:
modnames = [modname, '%s.%s' % (fullname, modname)]
else:
modnames = [
'%s%s' % (self.resolve_relpath(fullname, level), modname)
]
maybe_names.extend(modnames)
maybe_names.extend(
'%s.%s' % (mname, name)
for mname in modnames
for name in namelist
)
return self._related_cache.setdefault(fullname, sorted(
set(
name
for name in maybe_names
if sys.modules.get(name) is not None
and not is_stdlib_name(name)
and u'six.moves' not in name # TODO: crap
)
))
def find_related(self, fullname):
"""
Return a list of non-stdlib modules that are imported directly or
indirectly by `fullname`, plus their parents.
This method is like :py:meth:`on_disconect`, but it also recursively
searches any modules which are imported by `fullname`.
:param fullname: Fully qualified name of an _already imported_ module
for which source code can be retrieved
:type fullname: str
"""
stack = [fullname]
found = set()
while stack:
name = stack.pop(0)
names = self.find_related_imports(name)
stack.extend(set(names).difference(found, stack))
found.update(names)
found.discard(fullname)
return sorted(found)
class ModuleResponder(object):
def __init__(self, router):
self._router = router
self._finder = ModuleFinder()
self._cache = {} # fullname -> pickled
self.blacklist = []
self.whitelist = ['']
router.add_handler(
fn=self._on_get_module,
handle=mitogen.core.GET_MODULE,
)
def __repr__(self):
return 'ModuleResponder(%r)' % (self._router,)
MAIN_RE = re.compile(b(r'^if\s+__name__\s*==\s*.__main__.\s*:'), re.M)
def whitelist_prefix(self, fullname):
if self.whitelist == ['']:
self.whitelist = ['mitogen']
self.whitelist.append(fullname)
def blacklist_prefix(self, fullname):
self.blacklist.append(fullname)
def neutralize_main(self, src):
"""Given the source for the __main__ module, try to find where it
begins conditional execution based on a "if __name__ == '__main__'"
guard, and remove any code after that point."""
match = self.MAIN_RE.search(src)
if match:
return src[:match.start()]
return src
def _make_negative_response(self, fullname):
return (fullname, None, None, None, ())
def _build_tuple(self, fullname):
if mitogen.core.is_blacklisted_import(self, fullname):
raise ImportError('blacklisted')
if fullname in self._cache:
return self._cache[fullname]
path, source, is_pkg = self._finder.get_module_source(fullname)
if source is None:
LOG.error('_build_tuple(%r): could not locate source', fullname)
tup = self._make_negative_response(fullname)
self._cache[fullname] = tup
return tup
if is_pkg:
pkg_present = get_child_modules(path)
LOG.debug('_build_tuple(%r, %r) -> %r',
path, fullname, pkg_present)
else:
pkg_present = None
if fullname == '__main__':
source = self.neutralize_main(source)
compressed = mitogen.core.Blob(zlib.compress(source, 9))
related = [
to_text(name)
for name in self._finder.find_related(fullname)
if not mitogen.core.is_blacklisted_import(self, name)
]
# 0:fullname 1:pkg_present 2:path 3:compressed 4:related
tup = (
to_text(fullname),
pkg_present,
to_text(path),
compressed,
related
)
self._cache[fullname] = tup
return tup
def _send_load_module(self, stream, fullname):
if fullname not in stream.sent_modules:
LOG.debug('_send_load_module(%r, %r)', stream, fullname)
self._router._async_route(
mitogen.core.Message.pickled(
self._build_tuple(fullname),
dst_id=stream.remote_id,
handle=mitogen.core.LOAD_MODULE,
)
)
stream.sent_modules.add(fullname)
def _send_module_load_failed(self, stream, fullname):
stream.send(
mitogen.core.Message.pickled(
(fullname, None, None, None, ()),
dst_id=stream.remote_id,
handle=mitogen.core.LOAD_MODULE,
)
)
def _send_module_and_related(self, stream, fullname):
try:
tup = self._build_tuple(fullname)
if tup[2] and is_stdlib_path(tup[2]):
# Prevent loading of 2.x<->3.x stdlib modules! This costs one
# RTT per hit, so a client-side solution is also required.
LOG.warning('%r: refusing to serve stdlib module %r',
self, fullname)
self._send_module_load_failed(stream, fullname)
return
for name in tup[4]: # related
parent, _, _ = name.partition('.')
if parent != fullname and parent not in stream.sent_modules:
# Parent hasn't been sent, so don't load submodule yet.
continue
self._send_load_module(stream, name)
self._send_load_module(stream, fullname)
except Exception:
LOG.debug('While importing %r', fullname, exc_info=True)
self._send_module_load_failed(stream, fullname)
def _on_get_module(self, msg):
if msg.is_dead:
return
LOG.debug('%r._on_get_module(%r)', self, msg.data)
stream = self._router.stream_by_id(msg.src_id)
fullname = msg.data.decode()
if fullname in stream.sent_modules:
LOG.warning('_on_get_module(): dup request for %r from %r',
fullname, stream)
self._send_module_and_related(stream, fullname)
def _send_forward_module(self, stream, context, fullname):
if stream.remote_id != context.context_id:
stream.send(
mitogen.core.Message(
data=b('%s\x00%s' % (context.context_id, fullname)),
handle=mitogen.core.FORWARD_MODULE,
dst_id=stream.remote_id,
)
)
def _forward_module(self, context, fullname):
IOLOG.debug('%r._forward_module(%r, %r)', self, context, fullname)
path = []
while fullname:
path.append(fullname)
fullname, _, _ = fullname.rpartition('.')
for fullname in reversed(path):
stream = self._router.stream_by_id(context.context_id)
self._send_module_and_related(stream, fullname)
self._send_forward_module(stream, context, fullname)
def forward_module(self, context, fullname):
self._router.broker.defer(self._forward_module, context, fullname)
class Broker(mitogen.core.Broker):
shutdown_timeout = 5.0
_watcher = None
poller_class = mitogen.parent.PREFERRED_POLLER
def __init__(self, install_watcher=True):
if install_watcher:
self._watcher = ThreadWatcher.watch(
target=threading.currentThread(),
on_join=self.shutdown,
)
super(Broker, self).__init__()
def shutdown(self):
super(Broker, self).shutdown()
if self._watcher:
self._watcher.remove()
class Router(mitogen.parent.Router):
broker_class = Broker
profiling = False
def __init__(self, broker=None, max_message_size=None):
if broker is None:
broker = self.broker_class()
if max_message_size:
self.max_message_size = max_message_size
super(Router, self).__init__(broker)
self.upgrade()
def upgrade(self):
self.id_allocator = IdAllocator(self)
self.responder = ModuleResponder(self)
self.log_forwarder = LogForwarder(self)
self.route_monitor = mitogen.parent.RouteMonitor(router=self)
self.add_handler( # TODO: cutpaste.
fn=self._on_detaching,
handle=mitogen.core.DETACHING,
persist=True,
)
def enable_debug(self):
mitogen.core.enable_debug_logging()
self.debug = True
def __enter__(self):
return self
def __exit__(self, e_type, e_val, tb):
self.broker.shutdown()
self.broker.join()
def disconnect_stream(self, stream):
self.broker.defer(stream.on_disconnect, self.broker)
def disconnect_all(self):
for stream in self._stream_by_id.values():
self.disconnect_stream(stream)
class IdAllocator(object):
def __init__(self, router):
self.router = router
self.next_id = 1
self.lock = threading.Lock()
router.add_handler(
fn=self.on_allocate_id,
handle=mitogen.core.ALLOCATE_ID,
)
def __repr__(self):
return 'IdAllocator(%r)' % (self.router,)
BLOCK_SIZE = 1000
def allocate(self):
self.lock.acquire()
try:
id_ = self.next_id
self.next_id += 1
return id_
finally:
self.lock.release()
def allocate_block(self):
self.lock.acquire()
try:
id_ = self.next_id
self.next_id += self.BLOCK_SIZE
end_id = id_ + self.BLOCK_SIZE
LOG.debug('%r: allocating [%d..%d)', self, id_, end_id)
return id_, end_id
finally:
self.lock.release()
def on_allocate_id(self, msg):
if msg.is_dead:
return
id_, last_id = self.allocate_block()
requestee = self.router.context_by_id(msg.src_id)
allocated = self.router.context_by_id(id_, msg.src_id)
LOG.debug('%r: allocating [%r..%r) to %r',
self, id_, last_id, requestee)
msg.reply((id_, last_id))
|
{
"content_hash": "2fb8165964c5c33fb2cb8c8f83d5e673",
"timestamp": "",
"source": "github",
"line_count": 767,
"max_line_length": 79,
"avg_line_length": 32.559322033898304,
"alnum_prop": 0.5625675729788171,
"repo_name": "edwinsteele/biblebox-pi",
"id": "d057f7f19d9f2b90223fc889d9a53ed5f7ca9b59",
"size": "26481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ansible/plugins/mitogen-0.2.2/mitogen/master.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1111"
},
{
"name": "HCL",
"bytes": "5190"
},
{
"name": "HTML",
"bytes": "23308"
},
{
"name": "JavaScript",
"bytes": "12906"
},
{
"name": "PHP",
"bytes": "2338"
},
{
"name": "Python",
"bytes": "7021"
},
{
"name": "Ruby",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "14191"
}
],
"symlink_target": ""
}
|
"""
@brief test log(time=33s)
"""
import os
import unittest
import platform
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.ipythonhelper import test_notebook_execution_coverage
import cpyquickhelper
class TestRunNotebooksPython(ExtTestCase):
@unittest.skipIf(platform.system().lower() == "darwin",
reason="no openmp")
@unittest.skipIf(platform.system().lower() == "windows",
reason="bug in pytest")
def test_run_notebooks_branching(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import jyquickhelper as jyq
self.assertNotEmpty(jyq)
self.assertNotEmpty(cpyquickhelper)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks")
test_notebook_execution_coverage(
__file__, "branching", folder, 'cpyquickhelper', fLOG=fLOG)
@unittest.skipIf(platform.system().lower() == "windows",
reason="bug in pytest")
def test_run_notebooks_nobranching(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import jyquickhelper as jyq
self.assertNotEmpty(jyq)
self.assertNotEmpty(cpyquickhelper)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks")
test_notebook_execution_coverage(
__file__, "", folder, 'cpyquickhelper', fLOG=fLOG,
filter_name=lambda name: "branching" not in name)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "3e7a2fece0acf57b79b82b37e66e352a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 72,
"avg_line_length": 31.8,
"alnum_prop": 0.5786163522012578,
"repo_name": "sdpython/cpyquickhelper",
"id": "cc2b6491077a14d7d44ca5760b710a92ddab5f8f",
"size": "1773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_unittests/ut_documentation/test_run_notebooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "918"
},
{
"name": "C",
"bytes": "538"
},
{
"name": "C++",
"bytes": "226219"
},
{
"name": "CSS",
"bytes": "1378"
},
{
"name": "Cython",
"bytes": "8128"
},
{
"name": "HTML",
"bytes": "225"
},
{
"name": "Python",
"bytes": "1086166"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class ResourceUpdate(Model):
"""The Resource model definition.
:param tags: Resource tags
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ResourceUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
|
{
"content_hash": "05b92bffcd605a0160a6abcaee128765",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 54,
"avg_line_length": 23.176470588235293,
"alnum_prop": 0.5786802030456852,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "182590bbdd32fc7877b43645f319e68ed7cf4bd1",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/resource_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import theano.tensor as T
class JointAE(object):
def __init__(self, num_vis, num_hiddens, layer_types, autoencoder_act_funcs,
output_act_func, initializer,
cost_class, tie_weights,
init_params, **kargs):
self.hidden_layers = []
self.params = []
self.num_hidden_layers = len(num_hiddens)
self.x = T.matrix('x')
self.cost_func = cost_class
self.tie_weights = tie_weights
param_idx = 0
for layer_idx in xrange(self.num_hidden_layers):
if init_params is not None:
if tie_weights:
We = init_params[param_idx]
be = init_params[param_idx+1]
bd = init_params[param_idx+2]
param_idx += 3
else:
We = init_params[param_idx]
Wd = init_params[param_idx+1]
be = init_params[param_idx+2]
bd = init_params[param_idx+3]
param_idx += 4
else:
We = None
Wd = None
be = None
bd = None
if layer_idx == 0:
layer_input = self.x
num_input = num_vis
num_output = num_hiddens[layer_idx]
act_funcs = [autoencoder_act_funcs[layer_idx],
output_act_func]
else:
layer_input = self.hidden_layers[-1].encoder.output
num_input = num_hiddens[layer_idx-1]
num_output = num_hiddens[layer_idx]
act_funcs = [autoencoder_act_funcs[layer_idx],
autoencoder_act_funcs[layer_idx-1]]
layer = layer_types[layer_idx](
#input = layer_input,
n_in = num_input,
n_hid = num_output,
init = initializer,
act_func = act_funcs,
tie_weights = tie_weights,
We = We,
Wd = Wd,
be = be,
bd = bd,
**kargs)
self.hidden_layers.append(layer)
self.params.extend(layer.params)
def get_reconstruction(self, x):
num_layers = len(self.hidden_layers)
input = x
for layer_idx in xrange(num_layers):
output = self.hidden_layers[layer_idx].encoder.fprop(input)
input = output
rec = output
for layer_idx in xrange(num_layers-1, -1, -1):
output = self.hidden_layers[layer_idx].decoder.fprop(rec)
rec = output
return rec
def get_cost(self):
reconstructed = self.get_reconstruction(self.x)
C = self.cost_func(reconstructed, self.x)
rec_cost = C.get_cost()
# ae_cost = T.scalar('ae_cost')
# for i in xrange(len(self.hidden_layers)):
# ae_cost += self.hidden_layers[i].cost()
return rec_cost #+ ae_cost
|
{
"content_hash": "d46f3272b9eb2c4ea7c48c7837db24dc",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 80,
"avg_line_length": 35.44565217391305,
"alnum_prop": 0.4446488807114382,
"repo_name": "ybzhou/JointAE",
"id": "2ef627aba6f3c74e970fb0752639e9da440b0763",
"size": "3261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JointAE.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17776"
}
],
"symlink_target": ""
}
|
"""
Get frequency data on 2-grams
"""
import conceptual_spaces
conceptual_spaces.bigram_data("POS_ONLY", "Round2")
|
{
"content_hash": "c7959528f212054d0b342d1773343da1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 51,
"avg_line_length": 16.714285714285715,
"alnum_prop": 0.7264957264957265,
"repo_name": "hmillerbakewell/studious-octo-invention",
"id": "691dd592ae2660fe2d5793e9bdbf10bb89d2c44a",
"size": "117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/Round2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14680"
}
],
"symlink_target": ""
}
|
from jenkinsflow.flow import serial
from .framework import api_select
def test_multi_level_mixed(api_type):
with api_select.api(__file__, api_type) as api:
api.flow_job()
_params = (('password', '', 'Some password'), ('s1', '', 'Some string argument'))
api.job('job-1', max_fails=0, expect_invocations=1, expect_order=1, params=_params)
api.job('job-2', max_fails=0, expect_invocations=1, expect_order=2, params=_params, serial=True)
api.job('job-3', max_fails=0, expect_invocations=1, expect_order=3, params=_params)
api.job('job-4', max_fails=0, expect_invocations=1, expect_order=3, params=_params)
api.job('job-5', max_fails=0, expect_invocations=1, expect_order=3, params=_params)
api.job('job-6', max_fails=0, expect_invocations=1, expect_order=4, params=_params)
api.job('job-7', max_fails=0, expect_invocations=1, expect_order=5, params=_params, serial=True)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=1) as ctrl1:
ctrl1.invoke('job-1', password='a', s1='b')
ctrl1.invoke('job-2', password='a', s1='b')
with ctrl1.parallel(timeout=40, report_interval=3) as ctrl2:
with ctrl2.serial(timeout=40, report_interval=3) as ctrl3a:
ctrl3a.invoke('job-3', password='a', s1='b')
ctrl3a.invoke('job-6', password='a', s1='b')
with ctrl2.parallel(timeout=40, report_interval=3) as ctrl3b:
ctrl3b.invoke('job-4', password='a', s1='b')
ctrl3b.invoke('job-5', password='a', s1='b')
ctrl1.invoke('job-7', password='a', s1='b')
|
{
"content_hash": "175948da37764a5df566e64ab41c954b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 104,
"avg_line_length": 56.9,
"alnum_prop": 0.6051552431165788,
"repo_name": "lhupfeldt/jenkinsflow",
"id": "8a2181c2555b62c89bd3075b997fb93470359fa4",
"size": "1845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/multi_level_mixed_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "399"
},
{
"name": "HTML",
"bytes": "992"
},
{
"name": "JavaScript",
"bytes": "1410"
},
{
"name": "Makefile",
"bytes": "22"
},
{
"name": "Python",
"bytes": "358208"
},
{
"name": "Shell",
"bytes": "564"
}
],
"symlink_target": ""
}
|
import logging
import json
import requests
import xmltodict
import random
import string
import hashlib
# django modules
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import timezone
# local modules
from app import models
from app.utils import random_string, get_request_ip, get_server_host
__all__ = [
"make_nonce_str",
"wx_dict2xml",
"wx_signature",
"wx_sign_for_pay",
"wx_get_token",
"wx_get_jsapi_ticket",
"wx_pay_unified_order",
"wx_pay_order_query",
"resolve_wx_pay_notify",
"wx_send_tpl_msg",
"WX_SUCCESS",
"WX_FAIL",
"WX_PAYERROR",
"WX_AUTH_URL",
"WX_GET_OPENID_RUL",
"WX_TPL_MSG_URL",
]
logger = logging.getLogger('app')
_WX_PAY_UNIFIED_ORDER_LOG_FMT = 'weixin_pay_unified_order return: [{code}] {msg}.'
_WX_PAY_QUERY_ORDER_LOG_FMT = 'weixin_pay_query_order return: [{code}] {msg}.'
_WX_PAY_RESULT_NOTIFY_LOG_FMT = 'weixin_pay_result_notify return: [{code}] {msg}.'
WX_SUCCESS = 'SUCCESS'
WX_FAIL = 'FAIL'
WX_PAYERROR = 'PAYERROR'
WX_AUTH_URL = 'https://open.weixin.qq.com/connect/oauth2/authorize?appid='+settings.WEIXIN_APPID
WX_GET_OPENID_RUL = 'https://api.weixin.qq.com/sns/oauth2/access_token?grant_type=authorization_code'
# 微信模板消息
WX_TPL_MSG_URL = 'https://api.weixin.qq.com/cgi-bin/message/template/send?access_token={token}'
def make_nonce_str():
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def wx_dict2xml(d):
return xmltodict.unparse({'xml': d}, full_document=False)
def wx_xml2dict(xmlstr):
return xmltodict.parse(xmlstr)['xml']
def wx_signature(data):
string = '&'.join(['%s=%s' % (key.lower(), data[key]) for key in sorted(data) if data[key] is not None and data[key] is not ''])
return hashlib.sha1(string.encode('utf-8')).hexdigest()
def wx_sign_for_pay(params):
content = '&'.join(['%s=%s' % (key, params[key]) for key in sorted(params) if params[key] is not None and params[key] is not ''])
content += '&key=' + settings.WEIXIN_KEY
return hashlib.md5(content.encode('utf-8')).hexdigest().upper()
def wx_get_token():
wx_url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential'
wx_url += '&appid=' + settings.WEIXIN_APPID
wx_url += '&secret=' + settings.WEIXIN_APP_SECRET
req = requests.get(wx_url)
if req.status_code == 200:
ret = json.loads(req.text)
if "access_token" in ret:
models.WeiXinToken.objects.all().delete()
wxToken = models.WeiXinToken(token=ret['access_token'], token_type=models.WeiXinToken.ACCESS_TOKEN)
if "expires_in" in ret:
wxToken.expires_in = ret['expires_in']
wxToken.save()
return {'ok': True, 'token': ret['access_token'], 'code': 0}
else:
return {'ok': False, 'msg': '获取微信token出错!', 'code': -1}
else:
return {'ok': False, 'msg': '获取微信token出错,请联系管理员!', 'code': -1}
def wx_get_jsapi_ticket(access_token):
wx_url = 'https://api.weixin.qq.com/cgi-bin/ticket/getticket?access_token={token}&type=jsapi'\
.format(token=access_token)
req = requests.get(wx_url)
if req.status_code == 200:
ret = json.loads(req.text)
if "ticket" in ret:
models.WeiXinToken.objects.filter(token_type=models.WeiXinToken.JSAPI_TICKET).delete()
tk_obj = models.WeiXinToken(token_type=models.WeiXinToken.JSAPI_TICKET)
tk_obj.token = ret['ticket']
if "expires_in" in ret:
tk_obj.expires_in = ret['expires_in']
tk_obj.save()
return {'ok': True, 'ticket': ret['ticket'], 'code': 0}
else:
return {'ok': False, 'msg': '获取微信jsapi_ticket出错!', 'code': -1}
else:
return {'ok': False, 'msg': '获取微信jsapi_ticket出错,请联系管理员!', 'code': -1}
def wx_pay_unified_order(order, request, wx_openid):
"""
参考: https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_1
"""
wx_url = 'https://api.mch.weixin.qq.com/pay/unifiedorder'
params = {}
params['appid'] = settings.WEIXIN_APPID
params['mch_id'] = settings.WEIXIN_MERCHANT_ID
params['device_info'] = 'WEB' # 终端设备号(门店号或收银设备ID),注意:PC网页或公众号内支付请传"WEB"
params['nonce_str'] = make_nonce_str()
params['body'] = '课程购买'
# params['detail'] = '' # not required
# params['attach'] = '' # not required
# params['fee_type'] = 'CNY' # not required, 默认人民币:CNY
params['out_trade_no'] = order.order_id # Order model记录的ID
params['total_fee'] = order.to_pay # 订单总金额,单位为分
sp_ip = get_request_ip(request) # APP和网页支付提交用户端ip,Native支付填调用微信支付API的机器IP。
if not sp_ip:
sp_ip = '8.8.8.8'
_c = sp_ip.find(',')
if _c > -1:
sp_ip = sp_ip[0:_c]
params['spbill_create_ip'] = sp_ip
# params['time_start'] = '' # not required, yyyyMMddHHmmss
# params['time_expire'] = '' # not required, yyyyMMddHHmmss
# params['goods_tag'] = '' # not required, 代金券或立减优惠功能的参数
# params['product_id'] = '' # not required
# params['limit_pay'] = '' # not required, no_credit--指定不能使用信用卡支付
# TODO: 接收微信支付异步通知回调地址
params['notify_url'] = get_server_host(request) + reverse('wechat:wx_pay_notify')
params['trade_type'] = 'JSAPI' # JSAPI,NATIVE,APP
params['openid'] = wx_openid # trade_type=JSAPI,此参数必传,用户在商户appid下的唯一标识。
# 签名
params['sign'] = wx_sign_for_pay(params)
logger.debug(params)
req_xml_str = wx_dict2xml(params)
resp = requests.post(wx_url, data=req_xml_str.encode('utf-8'))
if resp.status_code == 200:
resp_dict = wx_xml2dict(resp.content.decode('utf-8'))
logger.debug(resp_dict)
return_code = resp_dict['return_code']
if return_code != WX_SUCCESS:
msg = resp_dict['return_msg']
logger.error(params)
logger.error(_WX_PAY_UNIFIED_ORDER_LOG_FMT.format(code=return_code, msg=msg))
return {'ok': False, 'msg': msg, 'code': 1}
given_resp_sign = resp_dict.pop('sign', None)
calculated_resp_sign = wx_sign_for_pay(resp_dict)
logger.debug('check wx_pay_order sign: '+str(given_resp_sign==calculated_resp_sign))
if given_resp_sign!=calculated_resp_sign:
return {'ok': False, 'msg': '签名失败'}
result_code = resp_dict['result_code']
if result_code != WX_SUCCESS:
msg = resp_dict['err_code_des']
logger.error(params)
logger.error(_WX_PAY_UNIFIED_ORDER_LOG_FMT.format(code=resp_dict['err_code'], msg=msg))
return {'ok': False, 'msg': msg, 'code': 1}
# prepay_id = resp_dict['prepay_id']
# print(prepay_id)
logger.info(_WX_PAY_UNIFIED_ORDER_LOG_FMT.format(code=return_code, msg=''))
# create charge object
_create_charge_object(params, order, resp_dict)
return {'ok': True, 'msg': '', 'code': 0, 'data': resp_dict}
else:
return {'ok': False, 'msg': '网络请求出错!', 'code': -1}
def _create_charge_object(pre_req_params, order, wx_pay_resp_dict):
charge = models.Charge()
charge.order = order
charge.ch_id = wx_pay_resp_dict['prepay_id']
charge.created = timezone.now()
charge.app = pre_req_params['appid']
charge.channel = models.Charge.WX_PUB_MALA
charge.order_no = order.order_id
charge.client_ip = pre_req_params['spbill_create_ip']
charge.amount = order.to_pay
charge.currency = 'cny'
charge.subject = ''
charge.body = pre_req_params['body']
charge.extra = json.dumps({'openid': pre_req_params['openid']})
charge.transaction_no = ''
charge.failure_code = ''
charge.failure_msg = ''
charge.metadata = ''
charge.credential = ''
charge.description = ''
charge.save()
def wx_pay_order_query(wx_order_id=None, order_id=None):
"""
参考: https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_2
"""
wx_url = 'https://api.mch.weixin.qq.com/pay/orderquery'
if not wx_order_id and not order_id:
return {'ok': False, 'msg': '没有订单号!', 'code': -4}
params = {}
params['appid'] = settings.WEIXIN_APPID
params['mch_id'] = settings.WEIXIN_MERCHANT_ID
# [微信的订单号, 商户系统内部的订单号]二选一, 优先使用'微信的订单号'
if wx_order_id:
params['transaction_id'] = wx_order_id # 微信的订单号
if order_id:
params['out_trade_no'] = order_id # 商户系统内部的订单号
params['nonce_str'] = make_nonce_str()
# 签名
params['sign'] = wx_sign_for_pay(params)
logger.debug(params)
req_xml_str = wx_dict2xml(params)
resp = requests.post(wx_url, data=req_xml_str.encode('utf-8'))
if resp.status_code == 200:
resp_dict = wx_xml2dict(resp.content.decode('utf-8'))
logger.debug(resp_dict)
return_code = resp_dict['return_code']
if return_code != WX_SUCCESS:
msg = resp_dict['return_msg']
logger.error(params)
logger.error(_WX_PAY_QUERY_ORDER_LOG_FMT.format(code=return_code, msg=msg))
return {'ok': False, 'msg': msg, 'code': 1}
given_resp_sign = resp_dict.pop('sign', None)
calculated_resp_sign = wx_sign_for_pay(resp_dict)
logger.debug('check wx_pay_query sign: '+str(given_resp_sign==calculated_resp_sign))
if given_resp_sign!=calculated_resp_sign:
return {'ok': False, 'msg': '签名失败'}
result_code = resp_dict['result_code']
if result_code != WX_SUCCESS:
msg = resp_dict['err_code_des']
logger.error(params)
logger.error(_WX_PAY_QUERY_ORDER_LOG_FMT.format(code=resp_dict['err_code'], msg=msg))
return {'ok': False, 'msg': msg, 'code': 1}
# trade_state = resp_dict['trade_state']
# print(trade_state)
"""
SUCCESS—支付成功
REFUND—转入退款
NOTPAY—未支付
CLOSED—已关闭
REVOKED—已撤销(刷卡支付)
USERPAYING--用户支付中
PAYERROR--支付失败(其他原因,如银行返回失败)
"""
logger.info(_WX_PAY_QUERY_ORDER_LOG_FMT.format(code=return_code, msg=''))
transaction_id = resp_dict['transaction_id']
out_trade_no = resp_dict['out_trade_no']
_set_charge_transaction_no(out_trade_no, transaction_id)
return {'ok': True, 'msg': '', 'code': 0, 'data': resp_dict}
else:
return {'ok': False, 'msg': '网络请求出错!', 'code': -1}
def _set_charge_transaction_no(order_no, transaction_id):
charge = models.Charge.objects.get(order__order_id=order_no)
charge.transaction_no = transaction_id
charge.save()
def resolve_wx_pay_notify(request):
req_dict = wx_xml2dict(request.body.decode('utf-8'))
logger.debug(req_dict)
return_code = req_dict['return_code']
if return_code != WX_SUCCESS:
msg = req_dict['return_msg']
logger.error(_WX_PAY_RESULT_NOTIFY_LOG_FMT.format(code=return_code, msg=msg))
return {'ok': False, 'msg': msg, 'code': 1}
given_resp_sign = req_dict.pop('sign', None)
calculated_resp_sign = wx_sign_for_pay(req_dict)
logger.debug('check wx_pay_notify sign: '+str(given_resp_sign==calculated_resp_sign))
if given_resp_sign!=calculated_resp_sign:
return {'ok': False, 'msg': '签名失败'}
result_code = req_dict['result_code']
if result_code != WX_SUCCESS:
msg = req_dict['err_code_des']
logger.error(_WX_PAY_RESULT_NOTIFY_LOG_FMT.format(code=req_dict['err_code'], msg=msg))
return {'ok': False, 'msg': msg, 'code': 1}
logger.info(_WX_PAY_RESULT_NOTIFY_LOG_FMT.format(code=return_code, msg=''))
# openid = req_dict['openid']
transaction_id = req_dict['transaction_id']
out_trade_no = req_dict['out_trade_no']
_set_charge_transaction_no(out_trade_no, transaction_id)
return {'ok': True, 'msg': '', 'code': 0, 'data': req_dict}
def wx_send_tpl_msg(token, tpl_id, openid, data, detail_url=''):
wx_url = WX_TPL_MSG_URL.format(token=token)
ct = {
'access_token': token,
'touser': openid,
'template_id': tpl_id,
'url': detail_url,
'topcolor': "#FF0000",
"data": data
}
resp = requests.post(wx_url, data=json.dumps(ct))
if resp.status_code == 200:
ret_json = json.loads(resp.content.decode('utf-8'))
else:
ret_json = {'ok': False}
logger.debug("wx_send_tpl_msg:")
logger.debug(ret_json)
return ret_json
|
{
"content_hash": "a2377dfcfae3794e00bc5919ba64cad6",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 133,
"avg_line_length": 38.877742946708466,
"alnum_prop": 0.6073213997742299,
"repo_name": "malaonline/Server",
"id": "505758ba032fb34c74cc515287a07d0ff1e01e3e",
"size": "13020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/wechat/wxapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236251"
},
{
"name": "HTML",
"bytes": "532032"
},
{
"name": "JavaScript",
"bytes": "580515"
},
{
"name": "Python",
"bytes": "987542"
},
{
"name": "Shell",
"bytes": "1881"
}
],
"symlink_target": ""
}
|
import unittest
from mock import Mock, patch
from airflow.models import Pool
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING, PoolSlotsAvailableDep
from airflow.utils.db import create_session
from tests.test_utils import db
class TestPoolSlotsAvailableDep(unittest.TestCase):
def setUp(self):
db.clear_db_pools()
with create_session() as session:
test_pool = Pool(pool='test_pool')
session.add(test_pool)
session.commit()
def tearDown(self):
db.clear_db_pools()
@patch('airflow.models.Pool.open_slots', return_value=0)
# pylint: disable=unused-argument
def test_pooled_task_reached_concurrency(self, mock_open_slots):
ti = Mock(pool='test_pool')
self.assertFalse(PoolSlotsAvailableDep().is_met(ti=ti))
@patch('airflow.models.Pool.open_slots', return_value=1)
# pylint: disable=unused-argument
def test_pooled_task_pass(self, mock_open_slots):
ti = Mock(pool='test_pool')
self.assertTrue(PoolSlotsAvailableDep().is_met(ti=ti))
@patch('airflow.models.Pool.open_slots', return_value=0)
# pylint: disable=unused-argument
def test_running_pooled_task_pass(self, mock_open_slots):
for state in STATES_TO_COUNT_AS_RUNNING:
ti = Mock(pool='test_pool', state=state)
self.assertTrue(PoolSlotsAvailableDep().is_met(ti=ti))
def test_task_with_nonexistent_pool(self):
ti = Mock(pool='nonexistent_pool')
self.assertFalse(PoolSlotsAvailableDep().is_met(ti=ti))
|
{
"content_hash": "088be3c949947111042da26303561bea",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 107,
"avg_line_length": 36.74418604651163,
"alnum_prop": 0.6822784810126582,
"repo_name": "Fokko/incubator-airflow",
"id": "ff8d1502d90dedef4b6877d12b0dc2144042d8d9",
"size": "2391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ti_deps/deps/test_pool_slots_available_dep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "145596"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "8787104"
},
{
"name": "Shell",
"bytes": "187296"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
"""
As you guess, utils module contains some useful tools.
"""
from __future__ import unicode_literals, print_function
from __future__ import absolute_import
import os, codecs, md5
def smartwrite(data, target):
"""A smart writer.
Not going to write if data is equal to target's data. Create folder
if target's path is not existed. """
if not os.path.exists(target):
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
open(target, 'wb').write(data)
logger.info('Writen: ' + target)
else:
oldmd5 = md5.new(open(target, 'rb').read()).hexdigest()
newmd5 = md5.new(data).hexdigest()
if oldmd5 != newmd5:
codecs.open(target, 'wb').write(data)
logger.info('Writen: ' + target)
|
{
"content_hash": "cfa9fcf72c142ad5375f59abfcec87d5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 72,
"avg_line_length": 30.37037037037037,
"alnum_prop": 0.6195121951219512,
"repo_name": "lingyunyumo/PyLinden",
"id": "86335f1fb19652ab35c037bae92f43567b1e6331",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylinden/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83582"
},
{
"name": "JavaScript",
"bytes": "4931"
},
{
"name": "Python",
"bytes": "2204643"
}
],
"symlink_target": ""
}
|
import defcon
from fontParts.base import BaseGuideline
from fontParts.fontshell.base import RBaseObject
class RGuideline(RBaseObject, BaseGuideline):
wrapClass = defcon.Guideline
def _init(self, wrap=None):
if wrap is None:
wrap = self.wrapClass()
wrap.x = 0
wrap.y = 0
wrap.angle = 0
super(RGuideline, self)._init(wrap=wrap)
# --------
# Position
# --------
# x
def _get_x(self):
return self.naked().x
def _set_x(self, value):
self.naked().x = value
# y
def _get_y(self):
return self.naked().y
def _set_y(self, value):
self.naked().y = value
# angle
def _get_angle(self):
return self.naked().angle
def _set_angle(self, value):
self.naked().angle = value
# --------------
# Identification
# --------------
# identifier
def _get_identifier(self):
guideline = self.naked()
return guideline.identifier
def _getIdentifier(self):
guideline = self.naked()
return guideline.generateIdentifier()
def _setIdentifier(self, value):
self.naked().identifier = value
# name
def _get_name(self):
return self.naked().name
def _set_name(self, value):
self.naked().name = value
# color
def _get_color(self):
value = self.naked().color
if value is not None:
value = tuple(value)
return value
def _set_color(self, value):
self.naked().color = value
|
{
"content_hash": "35c3023762b963e8c282f21e39ff4d1f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 48,
"avg_line_length": 19.675,
"alnum_prop": 0.5470139771283354,
"repo_name": "robofab-developers/fontParts",
"id": "03f3e46de2c111dff8d23f21d216bac43ebc31bc",
"size": "1574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/fontParts/fontshell/guideline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "913380"
},
{
"name": "Shell",
"bytes": "1837"
}
],
"symlink_target": ""
}
|
from dock.core import incoming, outgoing
|
{
"content_hash": "8e144184dcd8d53e47ec7f4520f52b32",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 40,
"avg_line_length": 41,
"alnum_prop": 0.8292682926829268,
"repo_name": "ravidziv/dock",
"id": "bccce1611c4c39efdecd62219bbd4423e9ace2fb",
"size": "41",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dock/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
"""
Backtracking TL synthesis implementation for gridworlds.
Scott C. Livingston,
2011, 2012.
"""
from automaton import BTAutomaton, BTAutomatonNode
from gridworld import *
import itertools
import copy
import numpy as np
import tulip.gr1cint
import tulip.jtlvint
from tulip.spec import GRSpec
def create_nominal(W, env_init_list, soln_str, restrict_radius=1,
var_prefix="Y", env_prefix="X"):
"""Create nominal automaton.
Generate nominal controller assuming environment can move, but not
anywhere that inteferes with nominal path.
nominal path should be sequence of coordinates, beginning with an
initial position all the way through loop closure. The point not
at the end of the path that should be attached to it has line
ending with "*". e.g.,
0 0
0 1
0 2
0 3 *
1 3
2 3
1 3
0 3
soln_str should contain the above path data. N.B., end-of-line
delimiter should be '\n'.
Return instance of btsynth.BTAutomaton
"""
# Parse nominal path string
nom_path = []
loop_marker = None
line_counter = -1
for line in soln_str.split("\n"):
line_counter += 1
parts = line.split()
if len(parts) < 2:
continue
if len(parts) > 2:
loop_marker = len(nom_path)
try:
nom_path.append((int(parts[0]), int(parts[1])))
except ValueError:
print "ERROR: malformed line in nominal path data, at line "+str(line_counter)
print line
exit(-1) # Be aggressive; force quit
if loop_marker is None:
print "ERROR: no loop closure marker found in nominal path data."
exit(-1) # Be aggressive; force quit
# Build list of system variables
pos_indices = [k for k in itertools.product(range(W.shape[0]), range(W.shape[1]))]
sys_vars = []
for k in pos_indices:
sys_vars.append(var_prefix+"_"+str(k[0])+"_"+str(k[1]))
sys_vars_nowhere = dict([(var, 0) for var in sys_vars])
# Build node without environment
aut = BTAutomaton()
last_id = 0
for step in nom_path:
node = BTAutomatonNode(id=last_id,
state=sys_vars_nowhere.copy(),
transition=[last_id+1])
node.state[var_prefix+"_"+str(step[0])+"_"+str(step[1])] = 1
aut.states.append(node)
last_id += 1
aut.states[-1].transition = [loop_marker]
# Augment for all environment variables
for env_ind in range(len(env_init_list)):
env_center = env_init_list[env_ind]
env_vars = []
nowhere_present = False
for i in range(env_center[0]-restrict_radius, env_center[0]+restrict_radius+1):
for j in range(env_center[1]-restrict_radius, env_center[1]+restrict_radius+1):
if i < 0 or j < 0 or i > W.shape[0]-1 or j > W.shape[1]-1:
nowhere_present = True
else:
env_vars.append(env_prefix+"_"+str(env_ind)+"_"+str(i)+"_"+str(j))
if nowhere_present:
for node in aut.states:
node.state[env_prefix+"_"+str(env_ind)+"_n_n"] = 1
env_vars.append(env_prefix+"_"+str(env_ind)+"_n_n")
aut.fleshOutGridState(nominal_vars=env_vars,
special_var=env_prefix+"_"+str(env_ind)+"_n_n")
else:
for node in aut.states:
node.state[env_vars[0]] = 1
aut.fleshOutGridState(nominal_vars=env_vars,
special_var=env_vars[0])
return aut
def gen_navobs_soln(init_list, goal_list, W, num_obs,
env_init_list, env_goal_list=None,
goals_disjunct=None,
restrict_radius=1,
var_prefix="Y", env_prefix="X",
only_realizability=False):
"""Generate solution as in gen_dsoln but now with dynamic obstacles.
Use gr1c (as interfaced through TuLiP) for synthesis.
This is a limited extension to the problem considered in
gen_dsoln. Here we introduce a finite number (num_obs) of
obstacles that navigate in restricted regions of the map W, thus
requiring a reactive controller to safely avoid them while
visiting the goal locations infinitely often.
env_init_list is the center position for each (env-controlled)
obstacle. Thus it must be that len(env_init_list) = num_obs
If env_goal_list is None (default), then regard the obstacle
initial positions as their goal positions.
N.B., we do not verify that env_goals are within restrict_radius
of initial env obstacle positions.
restrict_radius determines the domains of obstacles; cf. notes in
function LTL_world.
Return instance of btsynth.BTAutomaton on success;
None if not realizable, or an error occurs.
If only_realizability is True, then do *not* perform
synthesis. Instead, only check realizability of the specification,
and return True if realizable, False if not.
"""
# Argument error checking
if (len(init_list) == 0) or (num_obs < 0):
return None
# Handle default env obstacle goal case
if env_goal_list is None:
env_goal_list = env_init_list
# Handle degenerate case of no obstacles (thus, deterministic problem).
if num_obs < 1:
return gen_dsoln(init_list=init_list, goal_list=goal_list, W=W,
goals_disjunct=goals_disjunct,
var_prefix=var_prefix,
only_realizability=only_realizability)
########################################
# Environment prep
obs_bounds = []
env_trans = []
for k in range(num_obs):
center_loc = env_goal_list[k]
row_low = center_loc[0]-restrict_radius
nowhere = [False, # corresponds to row_low
False, # row_high
False, # col_low
False] # col_high
row_low = center_loc[0]-restrict_radius
if row_low < 0:
row_low = 0
nowhere[0] = True
row_high = center_loc[0]+restrict_radius
if row_high > W.shape[0]-1:
row_high = W.shape[0]-1
nowhere[1] = True
col_low = center_loc[1]-restrict_radius
if col_low < 0:
col_low = 0
nowhere[2] = True
col_high = center_loc[1]+restrict_radius
if col_high > W.shape[1]-1:
col_high = W.shape[1]-1
nowhere[3] = True
obs_bounds.append((row_low, row_high, col_low, col_high, nowhere))
env_trans.extend(LTL_world(W, var_prefix=env_prefix+"_"+str(k),
center_loc=center_loc,
restrict_radius=restrict_radius))
env_vars = []
for k in range(num_obs):
if (obs_bounds[k][4][0] or obs_bounds[k][4][1]
or obs_bounds[k][4][2] or obs_bounds[k][4][3]):
env_vars.append(env_prefix+"_"+str(k)+"_n_n")
for i in range(obs_bounds[k][0], obs_bounds[k][1]+1):
for j in range(obs_bounds[k][2], obs_bounds[k][3]+1):
env_vars.append(env_prefix+"_"+str(k)+"_"+str(i)+"_"+str(j))
# Environment progress: always eventually obstacle returns to
# initial position.
env_goal = []
for k in range(num_obs):
center_loc = env_goal_list[k]
if (center_loc[0] >= 0 and center_loc[0] <= W.shape[0]-1
and center_loc[1] >= 0 and center_loc[1] <= W.shape[1]-1):
env_goal.append(env_prefix+"_"+str(k)+"_"+str(center_loc[0])+"_"+str(center_loc[1]))
else:
env_goal.append(env_prefix+"_"+str(k)+"_n_n")
env_init = ""
if (env_init_list is not None) and len(env_init_list) > 0:
for obs_ind in range(num_obs):
loc = env_init_list[obs_ind]
if len(env_init) > 0:
env_init += " | "
if (loc[0] < row_low or loc[0] > row_high
or loc[1] < col_low or loc[1] > col_high):
env_init += "(" + env_prefix+"_"+str(obs_ind)+"_n_n"
env_init_mutex = " & ".join(["!"+ovar for ovar in env_vars if ovar.startswith(env_prefix+"_"+str(obs_ind)) and ovar != env_prefix+"_"+str(obs_ind)+"_n_n"])
if len(env_init_mutex) > 0:
env_init += " & " + env_init_mutex
env_init += ")"
else:
env_init += "(" + env_prefix+"_"+str(obs_ind)+"_"+str(loc[0])+"_"+str(loc[1])
env_init_mutex = " & ".join(["!"+ovar for ovar in env_vars if ovar.startswith(env_prefix+"_"+str(obs_ind)) and ovar != env_prefix+"_"+str(obs_ind)+"_"+str(loc[0])+"_"+str(loc[1])])
if len(env_init_mutex) > 0:
env_init += " & " + env_init_mutex
env_init += ")"
########################################
# Sys prep
sys_vars = []
for i in range(W.shape[0]):
for j in range(W.shape[1]):
sys_vars.append(var_prefix+"_"+str(i)+"_"+str(j))
sys_trans = LTL_world(W, var_prefix=var_prefix)
init_str = ""
for loc in init_list:
if len(init_str) > 0:
init_str += " | "
init_str += "(" + var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
init_str_mutex = " & ".join(["!"+ovar for ovar in sys_vars if ovar != var_prefix+"_"+str(loc[0])+"_"+str(loc[1])])
if len(init_str_mutex) > 0:
init_str += " & " + init_str_mutex
init_str += ")"
sys_goal = []
for loc in goal_list:
sys_goal.append(var_prefix+"_"+str(loc[0])+"_"+str(loc[1]))
if (goals_disjunct is not None) and len(goals_disjunct) > 0:
goal_dstr = ""
for loc in goals_disjunct:
if len(goal_dstr) == 0:
goal_dstr += var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
else:
goal_dstr += " | "+var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
sys_goal.append(goal_dstr)
########################################
# Interaction: avoid collisions
coll_str = ""
for k in range(num_obs):
for i in range(obs_bounds[k][0], obs_bounds[k][1]+1):
for j in range(obs_bounds[k][2], obs_bounds[k][3]+1):
if len(coll_str) > 0:
coll_str += " & "
coll_str += "!(" + var_prefix+"_"+str(i)+"_"+str(j)+"'"
coll_str += " & " + env_prefix+"_"+str(k)+"_"+str(i)+"_"+str(j)+"'"
coll_str += ")"
if len(coll_str) > 0:
sys_trans.append(coll_str)
spec = GRSpec(sys_vars=sys_vars, sys_init=init_str,
sys_safety=sys_trans, sys_prog=sys_goal,
env_vars=env_vars, env_init=env_init,
env_safety=env_trans, env_prog=env_goal)
if only_realizability:
return tulip.gr1cint.check_realizable(spec, verbose=1)
aut = tulip.gr1cint.synthesize(spec, verbose=1)
if aut is not None:
return BTAutomaton(tulip_aut=aut)
else:
return None # Attempt at synthesis failed
def gen_navobs_soln_JTLV(init_list, goal_list, W, num_obs,
env_init_list, env_goal_list=None,
goals_disjunct=None,
restrict_radius=1,
var_prefix="Y", env_prefix="X",
fname_prefix="tempsyn"):
"""Generate solution as in gen_dsoln but now with dynamic obstacles.
Use JTLV (as interfaced through TuLiP) for synthesis.
This is a limited extension to the problem considered in
gen_dsoln. Here we introduce a finite number (num_obs) of
obstacles that navigate in restricted regions of the map W, thus
requiring a reactive controller to safely avoid them while
visiting the goal locations infinitely often.
env_init_list is the center position for each (env-controlled)
obstacle. Thus it must be that len(env_init_list) = num_obs
If env_goal_list is None (default), then regard the obstacle
initial positions as their goal positions.
N.B., we do not verify that env_goals are within restrict_radius
of initial env obstacle positions.
restrict_radius determines the domains of obstacles; cf. notes in
function LTL_world.
Return instance of btsynth.BTAutomaton on success;
None if not realizable, or an error occurs.
"""
# Argument error checking
if (len(init_list) == 0) or (num_obs < 0):
return None
# Handle default env obstacle goal case
if env_goal_list is None:
env_goal_list = env_init_list
# Handle degenerate case of no obstacles (thus, deterministic problem).
if num_obs < 1:
return gen_dsoln_JTLV(init_list=init_list, goal_list=goal_list, W=W,
goals_disjunct=goals_disjunct,
var_prefix=var_prefix, fname_prefix=fname_prefix)
########################################
# Environment prep
obs_bounds = []
env_str = []
for k in range(num_obs):
center_loc = env_goal_list[k]
row_low = center_loc[0]-restrict_radius
nowhere = [False, # corresponds to row_low
False, # row_high
False, # col_low
False] # col_high
row_low = center_loc[0]-restrict_radius
if row_low < 0:
row_low = 0
nowhere[0] = True
row_high = center_loc[0]+restrict_radius
if row_high > W.shape[0]-1:
row_high = W.shape[0]-1
nowhere[1] = True
col_low = center_loc[1]-restrict_radius
if col_low < 0:
col_low = 0
nowhere[2] = True
col_high = center_loc[1]+restrict_radius
if col_high > W.shape[1]-1:
col_high = W.shape[1]-1
nowhere[3] = True
obs_bounds.append((row_low, row_high, col_low, col_high, nowhere))
env_str.append(LTL_world_JTLV(W, var_prefix="e."+env_prefix+"_"+str(k),
center_loc=center_loc,
restrict_radius=restrict_radius))
# Environment progress: always eventually obstacle returns to
# initial position.
env_goal_str = []
for k in range(num_obs):
center_loc = env_goal_list[k]
if (center_loc[0] >= 0 and center_loc[0] <= W.shape[0]-1
and center_loc[1] >= 0 and center_loc[1] <= W.shape[1]-1):
env_goal_str.append("[]<>(e."+env_prefix+"_"+str(k)+"_"+str(center_loc[0])+"_"+str(center_loc[1])+")")
else:
env_goal_str.append("[]<>(e."+env_prefix+"_"+str(k)+"_n_n)")
if len(env_goal_str) == 0:
env_goal_str = ""
else:
env_goal_str = " & ".join(env_goal_str) + " &\n"
env_init_str = ""
if (env_init_list is not None) and len(env_init_list) > 0:
for obs_ind in range(num_obs):
loc = env_init_list[obs_ind]
if len(env_init_str) > 0:
env_init_str += " | "
if (loc[0] < row_low or loc[0] > row_high
or loc[1] < col_low or loc[1] > col_high):
env_init_str += "(" + "e."+env_prefix+"_"+str(obs_ind)+"_n_n)"
else:
env_init_str += "(" + "e."+env_prefix+"_"+str(obs_ind)+"_"+str(loc[0])+"_"+str(loc[1]) + ")"
########################################
# Sys prep
safety_str = LTL_world_JTLV(W, var_prefix="s."+var_prefix)
init_str = ""
for loc in init_list:
if len(init_str) > 0:
init_str += " | "
init_str += "(" + "s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1]) + ")"
goal_str = ""
for loc in goal_list:
if len(goal_str) > 0:
goal_str += " & "
goal_str += "[]<>(" + "s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1]) + ")"
if (goals_disjunct is not None) and len(goals_disjunct) > 0:
goal_dstr = ""
for loc in goals_disjunct:
if len(goal_dstr) == 0:
if len(goal_str) == 0:
goal_dstr += "[]<>( s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
else:
goal_dstr += " & []<>( s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
goal_dstr += " | s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
goal_dstr += " )"
goal_str += goal_dstr
########################################
# Interaction: avoid collisions
coll_str = ""
for k in range(num_obs):
for i in range(obs_bounds[k][0], obs_bounds[k][1]+1):
for j in range(obs_bounds[k][2], obs_bounds[k][3]+1):
if len(coll_str) == 0:
coll_str += "[]( "
else:
coll_str += " & "
coll_str += "!(" + "s."+var_prefix+"_"+str(i)+"_"+str(j)
coll_str += " & e."+env_prefix+"_"+str(k)+"_"+str(i)+"_"+str(j)
coll_str += ")"
if len(coll_str) > 0:
coll_str += ")"
########################################
# Create SMV file
with open(fname_prefix+".smv", "w") as f:
# Some parts of this code are copied from tulip/rhtlp.py
f.write("MODULE main \n")
f.write("\tVAR\n")
f.write("\t\te : env();\n")
f.write("\t\ts : sys();\n\n")
f.write("MODULE sys \n")
f.write("\tVAR\n")
for i in range(W.shape[0]):
for j in range(W.shape[1]):
f.write("\t\t" + var_prefix+"_"+str(i)+"_"+str(j) + " : boolean;\n")
f.write("MODULE env \n")
f.write("\tVAR\n")
for k in range(num_obs):
if (obs_bounds[k][4][0] or obs_bounds[k][4][1]
or obs_bounds[k][4][2] or obs_bounds[k][4][3]):
f.write("\t\t" + env_prefix+"_"+str(k)+"_n_n" + " : boolean;\n")
for i in range(obs_bounds[k][0], obs_bounds[k][1]+1):
for j in range(obs_bounds[k][2], obs_bounds[k][3]+1):
f.write("\t\t" + env_prefix+"_"+str(k)+"_"+str(i)+"_"+str(j) + " : boolean;\n")
# Create SPC file
with open(fname_prefix+".spc", "w") as f:
f.write("LTLSPEC\n")
if len(env_init_str) > 0:
f.write("("+env_init_str+") & \n")
f.write(env_goal_str)
first_obs_flag = True
for k in range(num_obs):
if first_obs_flag:
f.write(env_str[k])
first_obs_flag = False
else:
f.write(" &\n" + env_str[k])
f.write("\n;\n\nLTLSPEC\n")
f.write("("+init_str+") & \n" + goal_str + " & \n" + safety_str)
if len(coll_str) > 0:
f.write(" & \n" + coll_str)
f.write("\n;")
# Try JTLV synthesis
realizable = tulip.jtlvint.solveGame(smv_file=fname_prefix+".smv",
spc_file=fname_prefix+".spc",
init_option=1, file_exist_option="r",
heap_size="-Xmx2048m")
if not realizable:
return None
else:
return BTAutomaton(fname_prefix+".aut")
def navobs_sim(init, aut, W_actual, num_obs, var_prefix="Y", env_prefix="X",
num_it=100):
"""Sister to dsim, but now for solutions from gen_navobs_soln.
If the world is fully known a priori, then tulip.grsim suffices
for generating a simulation. navobs_sim simulates uncertainty and
aborts when given controller breaks.
Env locative variable prefixes are expected to take the form
prefix_N_R_C, where R, C are (row, column) as usual, and N is the
number of the obstacle.
num_obs could be determined from analysing the automaton... future work.
Same return values as in dsim, but also list of obstacle positions
at time of failure.
"""
# Handle initialization as a special case.
if W_actual[init[0]][init[1]] == 1:
import pdb; pdb.set_trace()
#return init, None
# Based on given env_prefix, extract all environment variable names.
env_vars = []
for k in aut.states[0].state:
if k.startswith(env_prefix):
env_vars.append(k)
# To avoid re-computing list comprehension at every step...
fake_env_state = dict([(k,0) for k in env_vars])
# Main simulation execution
i, j = init
loc_var = var_prefix+"_"+str(i)+"_"+str(j)
next_node = aut.findAllAutPartState({loc_var : 1})
if len(next_node) == 0:
import pdb; pdb.set_trace()
#return init, None
next_node = next_node[0]
history = [(i,j),] # Initialize trace
it_counter = 0
while it_counter < num_it:
it_counter += 1
this_node = next_node
next_node_id = aut.execNextAutState(next_node.id,
env_state=fake_env_state,
randNext=True)
next_node = aut.getAutState(next_node_id)
next_loc = extract_autcoord(next_node, var_prefix=var_prefix)
if next_loc is None:
raise ValueError("Given automaton is incomplete; reached deadend.")
if len(next_loc) > 1:
raise ValueError("Given automaton invalid; more than one locative prop true, despite mutual exclusion.")
next_loc = next_loc[0]
if W_actual[next_loc[0]][next_loc[1]] == 1:
obs_poses = []
for obs_ind in range(num_obs):
obs_poses.append(extract_autcoord(this_node,
var_prefix=env_prefix+"_"+str(obs_ind))[0])
return history, next_loc, obs_poses
history.append(next_loc)
obs_poses = []
for obs_ind in range(num_obs):
obs_poses.append(extract_autcoord(next_node,
var_prefix=env_prefix+"_"+str(obs_ind))[0])
return history, True, obs_poses
def gen_dsoln(init_list, goal_list, W, goals_disjunct=None,
var_prefix="Y", only_realizability=False):
"""Generate deterministic solution, given initial and goal states.
Use gr1c (as interfaced through TuLiP) for synthesis.
init_list is a list of pairs (row, col), signifying locations in
the world matrix W from which the system can be initialized.
Similarly for goal_list, but locations to visit infinitely often.
If goals_disjunct is not None, then it must be a list of (row,
col) pairs specifying goals to be combined disjunctively in a
single []<>... formula. The default (None) does nothing.
Return instance of btsynth.BTAutomaton on success;
None if not realizable, or an error occurs.
If only_realizability is True, then do *not* perform
synthesis. Instead, only check realizability of the specification,
and return True if realizable, False if not.
"""
if len(init_list) == 0:
return None
spec_trans = LTL_world(W, var_prefix=var_prefix)
sys_vars = []
for i in range(W.shape[0]):
for j in range(W.shape[1]):
sys_vars.append(var_prefix+"_"+str(i)+"_"+str(j))
init_str = ""
for loc in init_list:
if len(init_str) > 0:
init_str += " | "
init_str += "(" + var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
init_str_mutex = " & ".join(["!"+ovar for ovar in sys_vars if ovar != var_prefix+"_"+str(loc[0])+"_"+str(loc[1])])
if len(init_str_mutex) > 0:
init_str += " & " + init_str_mutex
init_str += ")"
spec_goal = []
for loc in goal_list:
spec_goal.append(var_prefix+"_"+str(loc[0])+"_"+str(loc[1]))
if (goals_disjunct is not None) and len(goals_disjunct) > 0:
goal_dstr = ""
for loc in goals_disjunct:
if len(goal_dstr) == 0:
goal_dstr += var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
else:
goal_dstr += " | "+var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
spec_goal.append(goal_dstr)
spec = GRSpec(sys_vars=sys_vars, sys_init=init_str,
sys_safety=spec_trans, sys_prog=spec_goal)
if only_realizability:
return tulip.gr1cint.check_realizable(spec, verbose=1)
aut = tulip.gr1cint.synthesize(spec, verbose=1)
if aut is not None:
return BTAutomaton(tulip_aut=aut)
else:
return None # Attempt at synthesis failed
def gen_dsoln_JTLV(init_list, goal_list, W, goals_disjunct=None,
var_prefix="Y", fname_prefix="tempsyn"):
"""Generate deterministic solution, given initial and goal states.
Use JTLV (as interfaced through TuLiP) for synthesis.
init_list is a list of pairs (row, col), signifying locations in
the world matrix W from which the system can be initialized.
Similarly for goal_list, but locations to visit infinitely often.
If goals_disjunct is not None, then it must be a list of (row,
col) pairs specifying goals to be combined disjunctively in a
single []<>... formula. The default (None) does nothing.
Return instance of btsynth.BTAutomaton on success;
None if not realizable, or an error occurs.
"""
if len(init_list) == 0:
return None
safety_str = LTL_world_JTLV(W, var_prefix="s."+var_prefix)
init_str = ""
for loc in init_list:
if len(init_str) > 0:
init_str += " | "
init_str += "(" + "s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1]) + ")"
goal_str = ""
for loc in goal_list:
if len(goal_str) > 0:
goal_str += " & "
goal_str += "[]<>(" + "s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1]) + ")"
if goals_disjunct is not None:
goal_dstr = ""
for loc in goals_disjunct:
if len(goal_dstr) == 0:
if len(goal_str) == 0:
goal_dstr += "[]<>( s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
else:
goal_dstr += " & []<>( s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
goal_dstr += " | s."+var_prefix+"_"+str(loc[0])+"_"+str(loc[1])
goal_dstr += " )"
goal_str += goal_dstr
# Create SMV file
with open(fname_prefix+".smv", "w") as f:
# Some parts of this code are copied from tulip/rhtlp.py
f.write("MODULE main \n")
f.write("\tVAR\n")
f.write("\t\te : env();\n")
f.write("\t\ts : sys();\n\n")
f.write("MODULE sys \n")
f.write("\tVAR\n")
for i in range(W.shape[0]):
for j in range(W.shape[1]):
f.write("\t\t" + var_prefix+"_"+str(i)+"_"+str(j) + " : boolean;\n")
f.write("MODULE env \n")
f.write("\tVAR\n")
# Create SPC file
with open(fname_prefix+".spc", "w") as f:
f.write("LTLSPEC\n;\n\nLTLSPEC\n")
f.write("("+init_str+") & \n" + goal_str + " & \n" + safety_str)
f.write("\n;")
# Try JTLV synthesis
realizable = tulip.jtlvint.solveGame(smv_file=fname_prefix+".smv",
spc_file=fname_prefix+".spc",
init_option=1, file_exist_option="r")
if not realizable:
return None
else:
return BTAutomaton(fname_prefix+".aut")
def dsim(init, aut, W_actual, var_prefix="Y", num_it=100):
"""Simulate application of controller (automaton) on actual world.
The thrust is that a solution was synthesized for a world W that
is not correct, and so you would like to execute that controller
on the true world W_actual and see when it breaks.
var_prefix is the name prefix used for locative variables,
e.g. what was using when calling gen_dsoln (their default settings
should match).
Return history trace, ending at location (i.e. position of
vehicle) of failure, and intended next location (which implies an
action, since we are treating deterministic problem setting).
num_it is like a watchdog timer. Only num_it iterations will
execute before quitting (regardless of failure occurring).
If initial location is not even possible, then return initial
location and None (rather than intended action).
If quit because max number of iterations reached, history is
returned and True (rather than an intended location).
"""
# Handle initialization as a special case.
if W_actual[init[0]][init[1]] == 1:
return init, None
# Special case of initial location possible and zero iterations run.
if num_it == 0:
return init, True
# Main simulation execution
i, j = init
loc_var = var_prefix+"_"+str(i)+"_"+str(j)
next_node = aut.findAllAutPartState({loc_var : 1})
if len(next_node) == 0:
return init, None
next_node = next_node[0]
history = [(i,j),] # Initialize trace
it_counter = 0
while it_counter < num_it:
it_counter += 1
next_node_id = aut.execNextAutState(next_node.id, env_state={})
next_node = aut.getAutState(next_node_id)
# N.B., execNextAutState raises an exception on error.
next_loc = extract_autcoord(next_node, var_prefix=var_prefix)
if next_loc is None:
raise ValueError("Given automaton is incomplete; reached deadend.")
if len(next_loc) > 1:
raise ValueError("Given automaton invalid; more than one locative prop true, despite mutual exclusion.")
next_loc = next_loc[0]
if W_actual[next_loc[0]][next_loc[1]] == 1:
return history, next_loc
history.append(next_loc)
return history, True
def cond_anynot(memory):
if len(memory) == 0:
raise ValueError("Cannot apply transition-conditional on empty memory.")
if 0 in memory.values():
return True
else:
return False
def cond_all(memory):
if len(memory) == 0:
raise ValueError("Cannot apply transition-conditional on empty memory.")
if 0 in memory.values():
return False
else:
return True
def rule_clearall(aut, memory, prev_node_id, node_id, this_input):
"""Clear all memory values, regardless."""
##print "DEBUG: automaton memory cleared by node "+str(node_id)
return dict([(k, 0) for k in memory.keys()])
def rule_setmatch(aut, memory, prev_node_id, node_id, this_input):
"""Set memory variables nonzero depending on edge-node labeling.
For each memory variable name that is also an environment or
system variable, if the present node (from which the rule was
invoked) is labeled (or its incoming edge is labeled) with this
variable being nonzero (i.e. True as a Boolean variable), then set
that memory to 1.
N.B., this rule acts one-way, i.e. it can only *set* memory
variables, not clear them.
"""
node = aut.getAutState(node_id)
if node == -1:
raise Exception("FATAL: rule called with invalid node ID.")
for k in memory.keys():
if node.state.has_key(k) and node.state[k] != 0:
##print "DEBUG: automaton memory \""+str(k)+"\" set by node "+str(node_id)
memory[k] = 1
return memory
def btsim_d(init, goal_list, aut, W_actual, num_steps=100, var_prefix="Y"):
"""Backtrack/patching algorithm, applied to deterministic problem.
This case is elementary and, being non-adversarial, may be better
addressed by other methods (e.g., graph search or D*).
Nonetheless it provides a decent base case for testing the idea.
num_steps is the number of simulation steps to run; this count is
across corrections. That is, the basic execution sequence is
1. sim to fault or num_steps reached;
2. if fault, run backtrack/patch algorithm to improve automaton;
goto step 1 (after updating aut)
3. else (total step count num_steps reached; simulation without
fault), quit.
(num_steps is not the same as num_it in the function dsim.)
Returns an updated (to reflect the corrected controller)
instance of btsynth.BTAutomaton and the known world map at
time of completion. Note that the "known world" may not match
the given W_actual, because some parts of the world may never be
visited (hence, uncertainty not corrected).
If patching is impossible or seems as hard as the original
(overall) problem, then return (None, None).
"""
step_count = 0
while True:
if step_count == num_steps:
return aut, None
# Loop invariants
if num_steps-step_count < 0:
raise ValueError("overstepped bt dsim loop.")
# Sim
history, intent = dsim(init, aut, W_actual, var_prefix=var_prefix,
num_it=num_steps-step_count)
if intent is True:
return aut, None
step_count += len(history)
# Detect special case
if intent in goal_list:
return None, None
# Patch (terminology follows that of the paper)
gamma = 1 # radius
delta = 1 # increment
iteration_count = 0
while True:
iteration_count += 1
radius = gamma + (iteration_count-1)*delta
nbhd_inclusion = [] # Use Manhattan distance as metric
for i in range(intent[0]-radius, intent[0]+radius+1):
for j in range(intent[1]-radius, intent[1]+radius+1):
if i >= 0 and i < W_actual.shape[0] \
and j >= 0 and j < W_actual.shape[1]:
nbhd_inclusion.append((i, j))
if len(nbhd_inclusion) == 0:
raise ValueError("gamma radius is too small; neighborhood is empty.")
patch_goal_list = []
for v in nbhd_inclusion:
if v in goal_list:
patch_goal_list.append(v)
fail_loc_var = var_prefix+"_"+str(intent[0])+"_"+str(intent[1])
# Set of nodes in M corresponding to abstract neighborhood.
Reg = aut.computeGridReg(nbhd=nbhd_inclusion, var_prefix=var_prefix)
S0 = aut.getAutInit()
Init = set([node.id for node in S0]) & set(Reg)
Entry = aut.findEntry(Reg)
Exit = aut.findExit(Reg)
W_patch, offset = subworld(W_actual, nbhd_inclusion)
# Shift coordinates to be w.r.t. W_patch
for ind in range(len(patch_goal_list)):
patch_goal_list[ind] = (patch_goal_list[ind][0]-offset[0],
patch_goal_list[ind][1]-offset[1])
patch_auts = []
fail_flag = False
for l in Init|set(Entry):
init_loc = extract_autcoord(aut.getAutState(l), var_prefix=var_prefix)[0]
init_loc = (init_loc[0]-offset[0], init_loc[1]-offset[1])
local_goals_IDs = list(aut.computeReach(l, Reg) & set(Exit))
local_goals = []
for goal_ID in local_goals_IDs:
local_goals.append(extract_autcoord(aut.getAutState(goal_ID),
var_prefix=var_prefix)[0])
local_goals[-1] = (local_goals[-1][0]-offset[0],
local_goals[-1][1]-offset[1])
aut_patch = gen_dsoln(init_list=[init_loc], goal_list=patch_goal_list,
W=W_patch, goals_disjunct=local_goals,
var_prefix=var_prefix)
if aut_patch is not None:
patch_auts.append((aut_patch, l, local_goals_IDs))
else:
fail_flag = True
break
if not fail_flag:
break
# Merge (in several steps)
# Set rule to clearing mem cells for nodes in the original M
for node in aut.states:
node.addNodeRule(rule_clearall)
# Adjust map coordinates from local (patch-centric) to global,
# and expand set of variables of the patch to include all
# those of the (original) global problem.
patch_id_maps = []
full_state = aut.states[0].state.keys() # Pick out full variable list
for aut_ind in range(len(patch_auts)):
Ml = patch_auts[aut_ind][0]
for node in Ml.states:
prev_keys = node.state.keys()
(i, j) = extract_autcoord(node, var_prefix=var_prefix)[0]
node.state = dict([(k, 0) for k in full_state])
node.state[var_prefix+"_"+str(i+offset[0])+"_"+str(j+offset[1])] = 1
node.addNodeRule(rule_setmatch)
patch_id_maps.append(aut.importChildAut(Ml))
# Undo offset of the part of sys goal list addressed in patch
for k in range(len(patch_goal_list)):
patch_goal_list[k] = (patch_goal_list[k][0]+offset[0],
patch_goal_list[k][1]+offset[1])
# Add memory for these goals
aut.memInit([var_prefix+"_"+str(i)+"_"+str(j) for (i, j) in patch_goal_list])
# Attach entry and exit points
for aut_ind in range(len(patch_auts)):
l = patch_auts[aut_ind][1]
Ml = patch_auts[aut_ind][0]
local_goals_IDs = patch_auts[aut_ind][2]
entry_node = aut.getAutState(l)
match_list = Ml.findAllAutState(entry_node.state)
if len(match_list) == 0:
raise Exception("FATAL")
# Shortcut, given we are only addressing deterministic
# (non-adversarial) problem in this example.
entry_node.transition = [patch_id_maps[aut_ind][match_list[0].transition[0]]]
match_flag = False
for local_goal_ID in local_goals_IDs:
goal_node = aut.getAutState(local_goal_ID)
match_list = Ml.findAllAutState(goal_node.state)
if len(match_list) > 0:
match_flag = True
for match_node in match_list:
if len(aut.getMem()) > 0:
for k in range(len(aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond)):
if aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond[k] is None:
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond[k] = cond_anynot
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond.extend([cond_all for k in goal_node.cond])
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition.extend(goal_node.transition)
else:
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond = [None for k in aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition]
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond.extend([None for k in goal_node.cond])
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition = goal_node.transition[:]
if not match_flag:
raise Exception("FATAL")
# Delete blocked nodes and dependent edges
kill_list = []
for ind in range(len(aut.states)):
if extract_autcoord(aut.states[ind], var_prefix=var_prefix)[0] == intent:
kill_list.append(aut.states[ind].id)
for kill_id in kill_list:
aut.removeNode(kill_id)
aut.packIDs()
# Pick-off invalid initial nodes
aut.removeFalseInits(S0)
aut.packIDs()
def btsim_navobs(init, goal_list, aut, W_actual,
env_init_list, restrict_radius=1,
num_obs=None,
num_steps=100,
var_prefix="Y", env_prefix="X", use_JTLV=False):
"""Sister to btsim_d, but now for solutions from gen_navobs_soln.
if num_obs is None, set it to len(env_init_list); this is a
temporary hack till I clean up the code.
If the global problem is recovered, then a warning is printed and
(None, None) is returned.
Cf. doc for navobs_sim and gen_navobs_soln.
"""
if num_obs is None:
num_obs = len(env_init_list)
# We do not (yet) allow env obstacle init/goals to differ by user choice
env_goal_list = env_init_list[:]
step_count = 0
while True:
if step_count == num_steps:
return aut, None
# Loop invariants
if num_steps-step_count < 0:
raise ValueError("overstepped bt sim_navobs loop.")
# Sim
history, intent, obs_poses = navobs_sim(init, aut, W_actual,
num_obs=num_obs,
var_prefix=var_prefix,
env_prefix=env_prefix,
num_it=num_steps-step_count)
if intent is True:
return aut, None
step_count += len(history)
# Detect special case
if intent in goal_list:
return None, None
# Patch (terminology follows that of the paper)
gamma = 1 # radius, increment
radius = 0
while True:
radius += gamma
print "r_inc = "+str(radius)
nbhd_inclusion = [] # Use Manhattan distance as metric
for i in range(intent[0]-radius, intent[0]+radius+1):
for j in range(intent[1]-radius, intent[1]+radius+1):
if i >= 0 and i < W_actual.shape[0] \
and j >= 0 and j < W_actual.shape[1]:
nbhd_inclusion.append((i, j))
if len(nbhd_inclusion) == 0:
raise ValueError("gamma radius is too small; neighborhood is empty.")
patch_goal_list = []
patch_env_goals = []
for v in nbhd_inclusion:
if v in goal_list:
patch_goal_list.append(v)
if v in env_goal_list:
patch_env_goals.append((env_goal_list.index(v), v))
fail_loc_var = var_prefix+"_"+str(intent[0])+"_"+str(intent[1])
# Re-sort env obstacle goals
patch_env_goal_list = env_init_list[:]
for env_g in patch_env_goals:
patch_env_goal_list[env_g[0]] = env_g[1]
# Set of nodes in M corresponding to abstract nbhd.
Reg = aut.computeGridReg(nbhd=nbhd_inclusion, var_prefix=var_prefix)
S0 = aut.getAutInit()
Init = set([node.id for node in S0]) & set(Reg)
Entry = list(aut.findEntry(Reg))
Exit = aut.findExit(Reg)
# Remove newly blocked possibilities for dynamic obstacle positions.
for env_i in range(len(env_init_list)):
env_i_prefix = env_prefix+"_"+str(env_i)
Init = set([ind for ind in Init if extract_autcoord(aut.states[ind], var_prefix=env_i_prefix)[0] != intent])
Entry = set([ind for ind in Entry if extract_autcoord(aut.states[ind], var_prefix=env_i_prefix)[0] != intent])
if len(Reg) == aut.size():
print "WARNING: arrived at global problem, i.e., S = Reg."
return None, None
W_patch, offset = subworld(W_actual, nbhd_inclusion)
# Shift coordinates to be w.r.t. W_patch
for ind in range(len(patch_goal_list)):
patch_goal_list[ind] = (patch_goal_list[ind][0]-offset[0],
patch_goal_list[ind][1]-offset[1])
for ind in range(len(patch_env_goal_list)):
patch_env_goal_list[ind] = (patch_env_goal_list[ind][0]-offset[0],
patch_env_goal_list[ind][1]-offset[1])
patch_auts = []
fail_flag = False
for l in Init|set(Entry):
init_loc = extract_autcoord(aut.getAutState(l), var_prefix=var_prefix)[0]
init_loc = (init_loc[0]-offset[0], init_loc[1]-offset[1])
local_env_init = env_init_list[:]
for obs in range(num_obs):
local_env_init[obs] = extract_autcoord(aut.getAutState(l),
var_prefix=env_prefix+"_"+str(obs))[0]
local_env_init[obs] = (local_env_init[obs][0]-offset[0],
local_env_init[obs][1]-offset[1])
if len(Exit) == 0:
# Special case where it suffices to remain local
# forever (all system goals in here, etc.).
local_goals_IDs = []
else:
local_goals_IDs = list(aut.computeReach(l, Reg) & set(Exit))
if (l in local_goals_IDs) and (len(local_goals_IDs) > 1):
del local_goals_IDs[local_goals_IDs.index(l)]
local_goals = []
for goal_ID in local_goals_IDs:
local_goals.append(extract_autcoord(aut.getAutState(goal_ID),
var_prefix=var_prefix)[0])
local_goals[-1] = (local_goals[-1][0]-offset[0],
local_goals[-1][1]-offset[1])
local_goals = list(set(local_goals)) # Remove redundancy
if use_JTLV:
aut_patch = gen_navobs_soln_JTLV(init_list=[init_loc], goal_list=patch_goal_list,
W=W_patch, num_obs=num_obs,
env_init_list=local_env_init,
env_goal_list=patch_env_goal_list,
restrict_radius=restrict_radius,
goals_disjunct=local_goals,
var_prefix=var_prefix,
env_prefix=env_prefix)
else:
aut_patch = gen_navobs_soln(init_list=[init_loc], goal_list=patch_goal_list,
W=W_patch, num_obs=num_obs,
env_init_list=local_env_init,
env_goal_list=patch_env_goal_list,
restrict_radius=restrict_radius,
goals_disjunct=local_goals,
var_prefix=var_prefix,
env_prefix=env_prefix)
if aut_patch is not None:
patch_auts.append((aut_patch, l, local_goals_IDs))
else:
fail_flag = True
break
if not fail_flag:
break
# Merge (in several steps)
for aut_ind in range(len(patch_auts)):
patch_auts[aut_ind][0].trimDeadStates()
# Set rule to clearing mem cells for nodes in the original M
for node in aut.states:
node.addNodeRule(rule_clearall)
# Adjust map coordinates from local (patch-centric) to global,
# and expand set of variables of the patch to include all
# those of the (original) global problem.
patch_id_maps = []
env_vars_list = []
env_nowhere_vars = []
for obs in range(num_obs):
# Pick out full list of env variables, for each obstacle
env_vars_list.append(prefix_filt(aut.states[0].state,
prefix=env_prefix+"_"+str(obs)))
env_vars_list[-1] = env_vars_list[-1].keys()
env_nowhere_vars.append(env_prefix+"_"+str(obs)+"_n_n")
# Pick out full list of sys variables
sys_vars = prefix_filt(aut.states[0].state, prefix=var_prefix)
for aut_ind in range(len(patch_auts)):
Ml = patch_auts[aut_ind][0]
for node in Ml.states:
temp_state = copy.copy(node.state)
node.state = {}
for (k,v) in temp_state.items():
ex_result = extract_coord(k)
if ((ex_result is None)
or (ex_result[1] == -1 and ex_result[2] == -1)):
# not spatially-dependent variable; ignore
node.state[k] = v
else:
node.state[ex_result[0]+"_"+str(ex_result[1]+offset[0])+"_"+str(ex_result[2]+offset[1])] = v
for k in sys_vars.keys():
if not node.state.has_key(k):
node.state[k] = 0
for obs in range(num_obs):
if env_nowhere_vars[obs] in Ml.states[0].state.keys():
Ml.fleshOutGridState(env_vars_list[obs],
special_var=env_nowhere_vars[obs])
for node in Ml.states:
node.addNodeRule(rule_setmatch)
patch_id_maps.append(aut.importChildAut(Ml,
tags={"color": (np.random.randint(0, 256), np.random.randint(0, 256), np.random.randint(0, 256), 0.5),
"cluster_id": aut_ind}))
# Undo offset of the part of sys goal list addressed in patch
for k in range(len(patch_goal_list)):
patch_goal_list[k] = (patch_goal_list[k][0]+offset[0],
patch_goal_list[k][1]+offset[1])
# Add memory for these goals
aut.memInit([var_prefix+"_"+str(i)+"_"+str(j) for (i, j) in patch_goal_list])
# Attach entry and exit points
for aut_ind in range(len(patch_auts)):
l = patch_auts[aut_ind][1]
Ml = patch_auts[aut_ind][0]
local_goals_IDs = patch_auts[aut_ind][2]
entry_InSet = set(aut.getAutInSet(l)) - set(Reg)
if len(entry_InSet) == 0:
S0 = set([S0_node for S0_node in S0 if S0_node.id != l])
S0 = S0|set([aut.getAutState(patch_id_maps[aut_ind][Ml_node.id]) for Ml_node in Ml.getAutInit()])
else:
match_list = Ml.findAllAutPartState(aut.getAutState(l).state)
assert len(match_list) != 0
for entry_prenode in entry_InSet:
entry_prenode.transition[entry_prenode.transition.index(l)] = patch_id_maps[aut_ind][match_list[0].id]
if len(local_goals_IDs) == 0:
# Special case where it suffices to remain local
# forever (all system goals in here, etc.).
match_flag = True
else:
match_flag = False
for local_goal_ID in local_goals_IDs:
goal_node = aut.getAutState(local_goal_ID)
sys_state = prefix_filt(goal_node.state, prefix=var_prefix)
# match_list = Ml.findAllAutPartState(sys_state)
match_list = Ml.findAllAutPartState(goal_node.state)
if len(match_list) > 0:
match_flag = True
for match_node in match_list:
if len(aut.getMem()) > 0:
for k in range(len(aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond)):
if aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond[k] is None:
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond[k] = cond_anynot
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond.extend([cond_all for k in goal_node.cond])
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition.extend(goal_node.transition)
if goal_node.id in goal_node.transition:
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition[aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition.index(goal_node.id)] = patch_id_maps[aut_ind][match_node.id]
else:
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond = [None for k in aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition]
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).cond.extend([None for k in goal_node.cond])
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition = goal_node.transition[:]
if goal_node.id in goal_node.transition:
aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition[aut.getAutState(patch_id_maps[aut_ind][match_node.id]).transition.index(goal_node.id)] = patch_id_maps[aut_ind][match_node.id]
assert match_flag
# Delete blocked nodes and dependent edges
kill_list = []
for ind in range(len(aut.states)):
if extract_autcoord(aut.states[ind], var_prefix=var_prefix)[0] == intent:
kill_list.append(aut.states[ind].id)
for kill_id in kill_list:
aut.removeNode(kill_id)
aut.packIDs()
# Clean up any dangling ends
last_size = -1
while last_size != aut.size():
last_size = aut.size()
aut.trimDeadStates()
# Pick-off invalid initial nodes, and other clean-up
aut.removeFalseInits(S0)
aut.packIDs()
aut.cleanDuplicateTrans()
def to_formula(aut_node):
"""Take the given automaton node and return formula equivalent to state.
NOTE: ...this function, or one like it, may be general enough to
place in the tulip.automaton module directly.
ASSUMES ALL VARIABLES APPEARING IN GIVEN NODE ARE BOOLEAN.
Returns string type.
"""
out_str = ""
for (k, v) in aut_node.state.items():
if len(out_str) > 0:
out_str += " & "
if v > 0:
out_str += "("
else:
out_str += "(!"
out_str += k+")"
return out_str
def prefix_filt(d, prefix):
"""return all items in dictionary d with key with given prefix."""
match_list = []
for k in d.keys():
if isinstance(k, str):
if k.startswith(prefix):
match_list.append(k)
return dict([(k, d[k]) for k in match_list])
|
{
"content_hash": "9ece7643675eeb277e4d1b359cb6fba4",
"timestamp": "",
"source": "github",
"line_count": 1292,
"max_line_length": 220,
"avg_line_length": 42.44969040247678,
"alnum_prop": 0.5342146047953323,
"repo_name": "slivingston/btsynth",
"id": "a19b021f8244988f9f1e2363b924492ab0a35cd8",
"size": "54867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "btsynth/btsynth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "136581"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
}
|
import os
from werkzeug.datastructures import FileStorage
from shuttl.Models.FileTree.Directory import Directory
from shuttl.Models.FileTree.FileObjects.Template import Template
from shuttl.Models.FileTree.Webpage import Webpage
from shuttl.tests import testbase
from shuttl import app
from shuttl.Models.Reseller import Reseller
from shuttl.Models.organization import Organization
from shuttl.Models.Website import Website
from shuttl.Models.FileTree.FileObjects.CssFile import CssFile
from shuttl.Models.FileTree.FileObjects.JsFile import JsFile
import unittest
class FileTagTestCase(testbase.BaseTest):
@unittest.skipIf(app.config["SHOULD_SKIP"], reason='passed locally')
def test_filetag(self):
self.organization = Organization.Create(name="test", reseller=self.reseller)
self.website = Website.Create(name="Thing", organization=self.organization)
testFilePath = os.path.join(app.config["BASE_DIR"], "shuttl/test_files/file_tag", "file_tag.html")
testCssPath = os.path.join(app.config["BASE_DIR"], "shuttl/test_files", "test.css")
testJsPath = os.path.join(app.config["BASE_DIR"], "shuttl/test_files", "test.js")
with open(testFilePath, 'rb') as fp:
file = FileStorage(fp)
dir = Directory.Create(name='stuff', website=self.website)
self.website.root.addChild(dir)
fileTemplate = Template.Create(parent=dir, file=file, name='testfile', website=self.website)
pass
with open(testCssPath, 'rb') as fp:
file2 = FileStorage(fp)
cssFile = CssFile.Create(parent=dir, file=file2, name="style.css", website=self.website)
with open(testJsPath, 'rb') as fp:
file3 = FileStorage(fp)
jsFile = JsFile.Create(parent=dir, file=file3, name="app.js", website=self.website)
testWebpage = Webpage.Create(name='test_webpage', template=fileTemplate, website=self.website)
res = testWebpage.buildContent(website=self.website)
expected = """
<link href="http://test.shuttl.com:5000/getStaticContent/6" rel="stylesheet" type="text/css"/>
<script src="http://test.shuttl.com:5000/getStaticContent/7" type="text/javascript"></script>
"""
self.assertEquals(self.removeWhiteSpace(res), self.removeWhiteSpace(expected))
result = self.app.get('http://test.shuttl.com:5000/getStaticContent/6')
self.assertEquals(result.status_code, 200)
self.assertEquals(('Content-Type', 'text/css; charset=utf-8'), result.headers[1])
self.assertEquals(self.removeWhiteSpace(result.data.decode()), self.removeWhiteSpace('p{\n text-align: center;\n}'))
result = self.app.get('http://test.shuttl.com:5000/getStaticContent/7')
self.assertEquals(result.status_code, 200)
self.assertEquals(('Content-Type', 'application/javascript'), result.headers[1])
self.assertEquals(result.data.decode(), 'console.log();')
result = self.app.get('http://test.shuttl.com:5000/getStaticContent/8')
self.assertEquals(result.status_code, 404)
pass
|
{
"content_hash": "a456a80d05ed992964fe7567a365a539",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 127,
"avg_line_length": 52.813559322033896,
"alnum_prop": 0.6976893453145058,
"repo_name": "shuttl-io/shuttl",
"id": "e6aa6923cac0bed757198c3ee991e37727446965",
"size": "3116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shuttl/tests/test_filetag.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44727"
},
{
"name": "Elixir",
"bytes": "34"
},
{
"name": "HTML",
"bytes": "27869"
},
{
"name": "JavaScript",
"bytes": "29584"
},
{
"name": "Python",
"bytes": "286624"
},
{
"name": "Shell",
"bytes": "1496"
},
{
"name": "Vue",
"bytes": "44133"
}
],
"symlink_target": ""
}
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
command = parser.add_subparsers(help='commands')
commandA = command.add_parser('A')
commandA.add_argument('-a','--aye', help='command attribute')
operations = commandA.add_subparsers(help='operations')
operationX = operations.add_parser('X')
operationX.add_argument('-x','--ex', help='operation parameter')
args = parser.parse_args()
|
{
"content_hash": "2d02bd5d4bbf6f2864565e870d84497a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 64,
"avg_line_length": 22.75,
"alnum_prop": 0.7208791208791209,
"repo_name": "eddo888/Tools",
"id": "ddfb4cb74440d49a6743113765bd9850fcb613a6",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/nestegg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "208873"
},
{
"name": "Shell",
"bytes": "1444"
}
],
"symlink_target": ""
}
|
from robotide.lib.robot.errors import DataError
from robotide.lib.robot.utils import plural_or_not, seq2str
from robotide.lib.robot.variables import is_list_var
class ArgumentValidator(object):
def __init__(self, argspec):
""":type argspec: :py:class:`robot.running.arguments.ArgumentSpec`"""
self._argspec = argspec
def validate(self, positional, named, dryrun=False):
if dryrun and any(is_list_var(arg) for arg in positional):
return
named = set(name for name, value in named)
self._validate_no_multiple_values(positional, named, self._argspec)
self._validate_positional_limits(positional, named, self._argspec)
self._validate_no_mandatory_missing(positional, named, self._argspec)
self._validate_no_named_only_missing(named, self._argspec)
self._validate_no_extra_named(named, self._argspec)
def _validate_positional_limits(self, positional, named, spec):
count = len(positional) + self._named_positionals(named, spec)
if not spec.minargs <= count <= spec.maxargs:
self._raise_wrong_count(count, spec)
def _named_positionals(self, named, spec):
if not spec.supports_named:
return 0
return sum(1 for n in named if n in spec.positional)
def _raise_wrong_count(self, count, spec):
minend = plural_or_not(spec.minargs)
if spec.minargs == spec.maxargs:
expected = '%d argument%s' % (spec.minargs, minend)
elif not spec.varargs:
expected = '%d to %d arguments' % (spec.minargs, spec.maxargs)
else:
expected = 'at least %d argument%s' % (spec.minargs, minend)
if spec.kwargs or spec.kwonlyargs:
expected = expected.replace('argument', 'non-named argument')
raise DataError("%s '%s' expected %s, got %d."
% (spec.type, spec.name, expected, count))
def _validate_no_multiple_values(self, positional, named, spec):
if named and spec.supports_named:
for name in spec.positional[:len(positional)]:
if name in named:
raise DataError("%s '%s' got multiple values for argument "
"'%s'." % (spec.type, spec.name, name))
def _validate_no_mandatory_missing(self, positional, named, spec):
for name in spec.positional[len(positional):spec.minargs]:
if name not in named:
raise DataError("%s '%s' missing value for argument '%s'."
% (spec.type, spec.name, name))
def _validate_no_named_only_missing(self, named, spec):
defined = set(named) | set(spec.defaults)
missing = [arg for arg in spec.kwonlyargs if arg not in defined]
if missing:
raise DataError("%s '%s' missing named-only argument%s %s."
% (spec.type, spec.name, plural_or_not(missing),
seq2str(sorted(missing))))
def _validate_no_extra_named(self, named, spec):
if not spec.kwargs:
extra = set(named) - set(spec.positional) - set(spec.kwonlyargs)
if extra:
raise DataError("%s '%s' got unexpected named argument%s %s."
% (spec.type, spec.name, plural_or_not(extra),
seq2str(sorted(extra))))
|
{
"content_hash": "8ff3d57a8f2252166a6e57f33f6c91a6",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 47.388888888888886,
"alnum_prop": 0.5943728018757327,
"repo_name": "robotframework/RIDE",
"id": "7366bd0fa90c7f04fd5ff248a35d80b1122ee618",
"size": "4056",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robotide/lib/robot/running/arguments/argumentvalidator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31131"
},
{
"name": "HTML",
"bytes": "96342"
},
{
"name": "JavaScript",
"bytes": "42656"
},
{
"name": "Python",
"bytes": "3703410"
},
{
"name": "RobotFramework",
"bytes": "378004"
},
{
"name": "Shell",
"bytes": "1873"
}
],
"symlink_target": ""
}
|
import os
import unittest
from config import basedir
from app import app, db
from app.models import User, Post
from datetime import datetime, timedelta
class TestCase(unittest.TestCase):
def test_follow_posts(self):
# make 3 monkeys and 1 to test delete
u1 = User(nickname='alex', email='alex@a.com')
u2 = User(nickname='david', email='david@a.com')
u3 = User(nickname='lisa', email='lisa@a.com')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
# make 3 posts
utcnow = datetime.utcnow()
p1 = Post(body="alex first post", author=u1, timestamp=utcnow + timedelta(seconds=1))
p2 = Post(body="hello from david", author=u2, timestamp=utcnow + timedelta(seconds=2))
p3 = Post(body="Hi!!!:)", author=u3, timestamp=utcnow + timedelta(seconds=3))
db.session.add(p1)
db.session.add(p2)
db.session.add(p3)
db.session.commit()
# friend them
u1.friend(u2) # alex - david
u1.friend(u3) # alex - lisa
u2.friend(u3) # david - lisa
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
# check the followed posts of each user
db.session.rollback()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "48df828d025c95b5389dc78526515232",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 94,
"avg_line_length": 31.162790697674417,
"alnum_prop": 0.5970149253731343,
"repo_name": "dysya92/monkeys",
"id": "e86ba4a0f93dd9f7bd6e37b6cdc27b30819f3199",
"size": "1359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6349"
},
{
"name": "CSS",
"bytes": "11392"
},
{
"name": "HTML",
"bytes": "19208"
},
{
"name": "JavaScript",
"bytes": "119006"
},
{
"name": "Python",
"bytes": "10574554"
},
{
"name": "Shell",
"bytes": "3687"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# max size of lookup table size
max_lookup_tab_size = 65537
# node name hashing function
def str_hash(name):
return abs(hash(name))
class maglev_hash:
backends_num = 13 # => N
lookuptab_size = 5 # => M
permutation = [] # => prefer list
lookuptab = [] # => lookup table
backend_list = [] # => backend list
def __init__(self, nodes, lookup_size):
self.backends_num = len(nodes)
self.lookuptab_size = lookup_size
self.backend_list = [ backend for backend in nodes ]
self.generatePopulation()
self.populate()
def add_node(self, new_node):
if new_node in self.backend_list:
print("node " + new_node + " is found in existing list")
return
self.backend_list.append(new_node)
self.backends_num = len(self.backend_list)
self.generatePopulation()
self.populate()
def del_node(self, node):
if node not in self.backend_list:
print("node ", node, " is not found in existing list")
return
self.backend_list.remove(node)
self.backends_num = len(self.backend_list)
self.generatePopulation()
self.populate()
def get_name(node):
if (self.backends_num == 0):
print("empty backend list")
return None
key = str_hash(node)
return self.backend_list[ self.lookuptab[ key%self.lookuptab_size ] ]
def generatePopulation(self):
if 0 == self.backends_num:
return
for backend in self.backend_list:
offset = str_hash(backend) % self.lookuptab_size
skip = (str_hash(backend) % (self.lookuptab_size-1)) + 1
iRows = []
for j in range(self.lookuptab_size):
# keypoint1: ==>
iRows.append( (offset + j * skip) % self.lookuptab_size )
self.permutation.append(iRows)
def populate(self):
if 0 == self.backends_num:
return
next_idx = [ 0 for i in range(self.backends_num) ]
entry = [ -1 for i in range(self.lookuptab_size) ]
filled_entry = 0
# keypoint2 ==>
while True:
for idx_backend in range(self.backends_num):
c = self.permutation[idx_backend][ next_idx[idx_backend] ]
while entry[c] >= 0:
next_idx[idx_backend] += 1
c = self.permutation[idx_backend][next_idx[idx_backend]]
entry[c] = idx_backend
next_idx[idx_backend] += 1
filled_entry += 1
if filled_entry == self.lookuptab_size:
self.lookuptab = entry
return
def debug_lookuptab(self, indent):
print(indent + "debug lookup table content: ")
lookup = self.get_node_in_lookuptab()
print(indent*2+ str(lookup) )
# for i in self.lookuptab:
# print(indent*2 + str(i) + " <--> " + self.backend_list[ i ])
def get_node_in_lookuptab(self):
return [ self.backend_list[node] for node in self.lookuptab ]
def debug_print_maglev(self, indent):
print(indent + ("nodes number: %d" % self.backends_num))
for i in range(self.backends_num):
print(indent*2 + self.backend_list[i] + ": prefer list => " + str(self.permutation[i]) )
self.debug_lookuptab(indent)
def show_in_text_format(lookup_size, nodes_num):
# test case
indent = " "
nodes = []
for i in range(nodes_num):
nodes.append("backend-%d" % i)
test1 = maglev_hash(nodes, lookup_size)
print("init backend set info: ")
test1.debug_print_maglev(indent)
# add a new node
nodes.append("backend-%d" % nodes_num)
nodes_num += 1
test1.add_node( nodes[-1] )
print("====== add a new node ======")
test1.debug_print_maglev(indent)
# remove a existing node
test1.del_node(nodes[-1])
print("====== remove an existing node ======")
test1.debug_print_maglev(indent)
def show_in_char_format(lookup_size, init_nodes):
nodes = []
for i in range(init_nodes):
nodes.append("backend-%d" % i)
test1 = maglev_hash(nodes, lookup_size)
prev_lookup = test1.get_node_in_lookuptab()
curr_loopup = None
# add new node until (numbaer of node) is equal to (number of lookup table)
y_num_diff = [lookup_size]
x_num_nodes = [init_nodes]
while init_nodes < lookup_size:
test1.add_node( "backend-%d" % init_nodes )
curr_loopup = test1.get_node_in_lookuptab()
i, n = 0, 0
while i < lookup_size:
if prev_lookup[i] != curr_loopup[i]:
n += 1
i += 1
y_num_diff.append(n)
prev_lookup = curr_loopup
init_nodes += 1
x_num_nodes.append(init_nodes)
# print x_num_nodes
# print y_num_diff
# show its figure
fig = plt.figure()
plt.bar(x_num_nodes, y_num_diff, 0.4, color="green")
plt.xlabel("number of nodes")
plt.ylabel("number of distributes")
plt.title("Maglev Hashing")
plt.show()
# plt.savefig("maglev_hash_bar1.jpg")
if __name__ == '__main__':
show_in_text_format(13, 5)
show_in_char_format(13, 1)
|
{
"content_hash": "d85545e991fe8f7c0ed29f00b847b0d1",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 100,
"avg_line_length": 36.224489795918366,
"alnum_prop": 0.5703286384976526,
"repo_name": "feelkill/feelkill.github.io",
"id": "55cd656d06f44860edd035d57f9b8c5c1ca12afa",
"size": "5378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pieces_of_codes/maglev-hashing-imple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7878"
},
{
"name": "CSS",
"bytes": "21094"
},
{
"name": "HTML",
"bytes": "9868"
},
{
"name": "JavaScript",
"bytes": "40386"
},
{
"name": "Python",
"bytes": "16410"
}
],
"symlink_target": ""
}
|
"""JSON parser/serializer for Python.
The main implementation of this module is accessed through calls to
``read()`` and ``write()``. See their documentation for details.
"""
__author__ = "John Millikin <jmillikin@gmail.com>"
__version__ = (1, 6, 1)
__license__ = "GPL"
__all__ = [
'loads', 'dumps', 'read', 'write',
'ReadError', 'WriteError', 'UnknownSerializerError',
]
import codecs
from decimal import Decimal
import logging
import re
import sys
from UserString import UserString
# Constants {{{
KEYWORDS = (('null', None), ('true', True), ('false', False))
try:
INFINITY = float('inf')
except ValueError:
INFINITY = 1e300000
try:
NAN = float('nan')
except ValueError:
NAN = INFINITY / INFINITY
UNICODE_BOMS = [
(codecs.BOM_UTF32_BE, 'utf-32-be'),
(codecs.BOM_UTF32_LE, 'utf-32-le'),
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),
(codecs.BOM_UTF8, 'utf-8'),
]
UTF_HEADERS = [
((0, 0, 0, 1), 'utf-32-be'),
((1, 0, 0, 0), 'utf-32-le'),
((0, 1, 0, 1), 'utf-16-be'),
((1, 0, 1, 0), 'utf-16-le'),
]
NUMBER_SPLITTER = re.compile(
'^(?P<minus>-)?(?P<int>0|[1-9][0-9]*)' # Basic integer portion
'(?:\\.(?P<frac>[0-9]+))?' # Fractional portion
'(?P<exp>[eE][-+]?[0-9]+)?$', # Exponent
)
READ_ESCAPES = {
'\\': '\\',
'"': '"',
'/': '/',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
}
WRITE_ESCAPES = {
# Escaping the solidus is a security measure intended for
# protecting users from broken browser parsing, if the consumer
# is stupid enough to parse JSON by including it directly into
# a <script> tag.
#
# See: http://t3.dotgnu.info/blog/insecurity/quotes-dont-help.html
#'/': '\\/', - but WE don't do that (md)
'"': '\\"',
'\t': '\\t',
'\b': '\\b',
'\n': '\\n',
'\r': '\\r',
'\f': '\\f',
'\\': '\\\\'
}
for __char_ord in range(0, 0x20):
WRITE_ESCAPES.setdefault(unichr(__char_ord), '\\u%04x' % __char_ord)
ALLOWED_WHITESPACE = u'\u0020\u0009\u000A\u000D'
# }}}
# Exception classes {{{
class ReadError(ValueError):
"""Exception raised if there is an error parsing a JSON expression."""
pass
class WriteError(ValueError):
"""Exception raised if there is an error generating a JSON expression."""
pass
class UnknownSerializerError(WriteError):
"""Exception raised if there is no known way to convert a
value to a JSON expression.
"""
pass
# }}}
# Parser {{{
def unicode_autodetect_encoding(bytestring):
"""Intelligently convert a byte string to Unicode.
Assumes the encoding used is one of the UTF-* variants. If the
input is already in unicode, this is a noop.
"""
if isinstance(bytestring, unicode):
return bytestring
# Check for UTF byte order marks in the bytestring
for bom, encoding in UNICODE_BOMS:
if bytestring.startswith(bom):
return bytestring[len(bom):].decode(encoding)
# Autodetect UTF-* encodings using the algorithm in the RFC
# Don't use inline if..else for Python 2.4
header = tuple((ord(b) and 1) or 0 for b in bytestring[:4])
for utf_header, encoding in UTF_HEADERS:
if header == utf_header:
return bytestring.decode(encoding)
# Default to UTF-8
try:
return bytestring.decode('utf-8')
except UnicodeDecodeError:
logging.info('Codierungsproblem: %r', bytestring)
raise
class ParseErrorHelper(object):
"""Small class to provide a collection of error-formatting routines
shared between the Python and C implementation.
"""
def next_char_ord(self, text, offset):
value = ord(text[offset])
if (0xD800 <= value <= 0xDBFF) and len(text) >= 2:
upper = value
lower = ord(text[offset + 1])
upper -= 0xD800
lower -= 0xDC00
value = ((upper << 10) + lower) + 0x10000
if value > 0xffff:
return "U+%08X" % value
return "U+%04X" % value
def generic(self, text, offset, message):
line = text.count('\n', 0, offset) + 1
if line == 1:
column = offset + 1
else:
column = offset - text.rindex('\n', 0, offset)
template = ("JSON parsing error at line %d, column %d"
" (position %d): %s")
error = template % (line, column, offset, message)
raise ReadError(error)
def unexpected(self, text, offset, looking_for):
char_ord = self.next_char_ord(text, offset)
if looking_for is None:
desc = "Unexpected %s." % (char_ord,)
else:
desc = "Unexpected %s while looking for %s." % (char_ord, looking_for)
self.generic(text, offset, desc)
def extra_data(self, text, offset):
self.generic(text, offset, "Extra data after JSON expression.")
def no_expression(self, text, offset):
self.generic(text, offset, "No expression found.")
def unknown_escape(self, text, offset, escape):
self.generic(text, offset, "Unknown escape code: \\%s." % escape)
def unterminated_unicode(self, text, offset):
self.generic(text, offset, "Unterminated unicode escape.")
def unterminated_string(self, text, offset):
self.generic(text, offset, "Unterminated string.")
def reserved_code_point(self, text, offset, ord):
self.generic(text, offset, "U+%04X is a reserved code point." % ord)
def missing_surrogate(self, text, offset):
self.generic(text, offset, "Missing surrogate pair half.")
def invalid_number(self, text, offset):
self.generic(text, offset, "Invalid number.")
class Parser(object):
def __init__(self, text, use_float, error_helper):
self.text = text
self.index = 0
self.use_float = use_float
self.raise_ = error_helper
def parse(self):
value = self.parse_raw(True)
self.skip_whitespace()
if self.index != len(self.text):
self.raise_.extra_data(self.text, self.index)
return value
def parse_raw(self, root=False):
self.skip_whitespace()
if self.index == len(self.text):
self.raise_.no_expression(self.text, 0)
c = self.text[self.index]
if c == '{':
return self.read_object()
if c == '[':
return self.read_array()
if root:
self.raise_unexpected()
if c == '"':
return self.read_string()
if c in 'tfn':
return self.read_keyword()
if c in '-0123456789':
return self.read_number()
self.raise_unexpected()
def read_object(self):
retval = {}
start = self.index
skip = lambda: self.skip_whitespace(start, "Unterminated object.")
c = lambda: self.text[self.index]
self.skip('{', "object start")
skip()
if c() == '}':
self.skip('}', "object end")
return retval
while True:
skip()
if c() != '"':
self.raise_unexpected("property name")
key = self.parse_raw()
skip()
self.skip(':', "colon")
skip()
value = self.parse_raw()
retval[key] = value
skip()
if c() == '}':
self.skip('}', "object end")
return retval
self.skip(',', "comma")
def read_array(self):
retval = []
start = self.index
skip = lambda: self.skip_whitespace(start, "Unterminated array.")
c = lambda: self.text[self.index]
self.skip('[', "array start")
skip()
if c() == ']':
self.skip(']', "array end")
return retval
while True:
skip()
value = self.parse_raw()
retval.append(value)
skip()
if c() == ']':
self.skip(']', "array end")
return retval
self.skip(',', "comma")
def read_string(self):
text_len = len(self.text)
start = self.index
escaped = False
chunks = []
self.skip('"', "string start")
while True:
while not escaped:
if self.index >= text_len:
self.raise_.unterminated_string(self.text, start)
c = self.text[self.index]
if c == '\\':
escaped = True
elif c == '"':
self.skip('"', "string end")
return u''.join(chunks)
elif ord(c) < 0x20:
self.raise_unexpected()
else:
chunks.append(c)
self.index += 1
escaped = False
if self.index >= text_len:
self.raise_.unterminated_string(self.text, start)
c = self.text[self.index]
if c == 'u':
unescaped = self.read_unicode_escape()
chunks.append(unescaped)
elif c in READ_ESCAPES:
chunks.append(READ_ESCAPES[c])
else:
self.raise_.unknown_escape(self.text, self.index - 1, c)
self.index += 1
def read_unicode_escape(self):
"""Read a JSON-style Unicode escape.
Unicode escapes may take one of two forms:
* \\uUUUU, where UUUU is a series of four hexadecimal digits that
indicate a code point in the Basic Multi-lingual Plane.
* \\uUUUU\\uUUUU, where the two points encode a UTF-16 surrogate pair.
In builds of Python without wide character support, these are
returned as a surrogate pair.
"""
first_hex_str = self.text[self.index + 1:self.index + 5]
if len(first_hex_str) < 4 or '"' in first_hex_str:
self.raise_.unterminated_unicode(self.text, self.index - 1)
first_hex = int(first_hex_str, 16)
# Some code points are reserved for indicating surrogate pairs
if 0xDC00 <= first_hex <= 0xDFFF:
self.raise_.reserved_code_point(self.text, self.index - 1, first_hex)
# Check if it's a UTF-16 surrogate pair
if not(0xD800 <= first_hex <= 0xDBFF):
self.index += 4
return unichr(first_hex)
second_hex_str = self.text[self.index + 5:self.index + 11]
if(not(len(second_hex_str) >= 6
and second_hex_str.startswith('\\u'))
or '"' in second_hex_str):
self.raise_.missing_surrogate(self.text, self.index + 5)
second_hex = int(second_hex_str[2:], 16)
if sys.maxunicode <= 65535:
retval = unichr(first_hex) + unichr(second_hex)
else:
# Convert to 10-bit halves of the 20-bit character
first_hex -= 0xD800
second_hex -= 0xDC00
# Merge into 20-bit character
retval = unichr((first_hex << 10) + second_hex + 0x10000)
self.index += 10
return retval
def read_keyword(self):
for text, value in KEYWORDS:
end = self.index + len(text)
if self.text[self.index:end] == text:
self.index = end
return value
self.raise_unexpected()
def read_number(self):
allowed = '0123456789-+.eE'
end = self.index
try:
while self.text[end] in allowed:
end += 1
except IndexError:
pass
match = NUMBER_SPLITTER.match(self.text[self.index:end])
if not match:
self.raise_.invalid_number(self.text, self.index)
self.index = end
int_part = int(match.group('int'), 10)
if match.group('frac') or match.group('exp'):
if self.use_float:
return float(match.group(0))
return Decimal(match.group(0))
if match.group('minus'):
return -int_part
return int_part
def skip(self, text, error):
new_index = self.index + len(text)
skipped = self.text[self.index:new_index]
if skipped != text:
self.raise_unexpected(error)
self.index = new_index
def skip_whitespace(self, start=None, err=None):
text_len = len(self.text)
ws = '\x09\x20\x0a\x0d'
while self.index < text_len and self.text[self.index] in ws:
self.index += 1
if self.index >= text_len and (start is not None) and (err is not None):
self.raise_.generic(self.text, start, err)
def raise_unexpected(self, message=None):
self.raise_.unexpected(self.text, self.index, message)
def read_impl(text, use_float, error_helper, decimal):
parser = Parser(text, use_float, error_helper)
return parser.parse()
def read(bytestring, use_float=False):
"""Parse a JSON expression into a Python value.
If string is a byte string, it will be converted to Unicode
before parsing (see unicode_autodetect_encoding).
"""
text = unicode_autodetect_encoding(bytestring)
return read_impl(text, use_float, ParseErrorHelper(), Decimal)
loads = read
# }}}
# Serializer {{{
ATOMIC_TYPES = (
type(None), bool, int, long, float, complex, Decimal, unicode, str)
def is_iterable(x):
try:
iter(x)
return True
except TypeError:
return False
class SerializerErrorHelper(object):
def invalid_root(self):
raise WriteError("The outermost container must be an array or object.")
def unknown_serializer(self, value):
raise UnknownSerializerError("No known serializer for object: %r" % (value,))
def self_referential(self):
raise WriteError("Cannot serialize self-referential values.")
def invalid_object_key(self):
raise WriteError("Only strings may be used as object keys.")
def incomplete_surrogate(self):
raise WriteError("Cannot serialize incomplete surrogate pair.")
def invalid_surrogate(self):
raise WriteError("Cannot serialize invalid surrogate pair.")
def reserved_code_point(self, ord):
raise WriteError("Cannot serialize reserved code point U+%04X." % ord)
def no_nan(self):
raise WriteError("Cannot serialize NaN.")
def no_infinity(self):
raise WriteError("Cannot serialize Infinity.")
def no_neg_infinity(self):
raise WriteError("Cannot serialize -Infinity.")
def no_imaginary(self):
raise WriteError("Cannot serialize complex numbers with"
" imaginary components.")
class Serializer(object):
def __init__(self, sort_keys, indent, ascii_only,
coerce_keys, encoding, on_unknown,
error_helper):
self.sort_keys = sort_keys
self.indent = indent
self.ascii_only = ascii_only
self.coerce_keys = coerce_keys
self.encoding = encoding
self.on_unknown = on_unknown
self.raise_ = error_helper
def append(self, value):
raise NotImplementedError
def serialize(self, value):
raise NotImplementedError
def serialize_object(self, value, parent_ids, in_unknown_hook=False):
if isinstance(value, UserString):
value = value.data
if isinstance(value, ATOMIC_TYPES):
if not parent_ids:
self.raise_.invalid_root()
self.serialize_atom(value)
elif hasattr(value, 'items'):
self.serialize_mapping(value, parent_ids)
elif is_iterable(value):
self.serialize_iterable(value, parent_ids)
elif not in_unknown_hook:
new_value = self.on_unknown(value,
self.raise_.unknown_serializer)
self.serialize_object(new_value, parent_ids, True)
else:
self.raise_.unknown_serializer(value)
def get_separators(self, indent_level):
if self.indent is None:
return '', ''
else:
indent = '\n' + (self.indent * (indent_level + 1))
post_indent = '\n' + (self.indent * indent_level)
return indent, post_indent
def serialize_mapping(self, value, parent_ids):
v_id = id(value)
if v_id in parent_ids:
self.raise_.self_referential()
a = self.append
first = True
items = value.items()
if self.sort_keys:
items = sorted(items)
indent, post_indent = self.get_separators(len(parent_ids))
a('{')
for key, item in items:
if isinstance(key, UserString):
key = key.data
if not isinstance(key, (str, unicode)):
if self.coerce_keys:
key = unicode(key)
else:
self.raise_.invalid_object_key()
if first:
first = False
else:
a(',')
a(indent)
self.serialize_object(key, parent_ids + [v_id])
if self.indent is None:
a(':')
else:
a(': ')
self.serialize_object(item, parent_ids + [v_id])
a(post_indent)
a('}')
def serialize_iterable(self, value, parent_ids):
v_id = id(value)
if v_id in parent_ids:
self.raise_.self_referential()
a = self.append
indent, post_indent = self.get_separators(len(parent_ids))
a('[')
first = True
for item in value:
if first:
first = False
else:
a(',')
a(indent)
self.serialize_object(item, parent_ids + [v_id])
a(post_indent)
a(']')
def serialize_atom(self, value):
for keyword, kw_value in KEYWORDS:
if value is kw_value:
return self.append(keyword)
if isinstance(value, unicode):
self.serialize_unicode(value)
elif isinstance(value, str):
self.serialize_bytes(value)
elif isinstance(value, (int, long)):
self.append(unicode(value))
elif isinstance(value, float):
self.serialize_float(value)
elif isinstance(value, complex):
self.serialize_complex(value)
elif isinstance(value, Decimal):
self.serialize_decimal(value)
else:
self.raise_.unknown_serializer(value)
def serialize_bytes(self, value):
self.serialize_unicode(unicode(value, 'ascii'))
def serialize_unicode(self, value):
a = self.append
stream = iter(value)
a('"')
for char in stream:
ochar = ord(char)
if char in WRITE_ESCAPES:
a(WRITE_ESCAPES[char])
elif ochar > 0x7E:
# Prevent invalid surrogate pairs from being
# encoded.
if 0xD800 <= ochar <= 0xDBFF:
try:
nextc = stream.next()
except StopIteration:
self.raise_.incomplete_surrogate()
onext = ord(nextc)
if not(0xDC00 <= onext <= 0xDFFF):
self.raise_.invalid_surrogate()
if self.ascii_only:
a('\\u%04x\\u%04x' % (ochar, onext))
else:
a(char)
a(nextc)
elif 0xDC00 <= ochar <= 0xDFFF:
self.raise_.reserved_code_point(ochar)
elif self.ascii_only:
if ochar > 0xFFFF:
unicode_value = ord(char)
reduced = unicode_value - 0x10000
second_half = (reduced & 0x3FF) # Lower 10 bits
first_half = (reduced >> 10)
first_half += 0xD800
second_half += 0xDC00
a('\\u%04x\\u%04x' % (first_half, second_half))
else:
a('\\u%04x' % ochar)
else:
a(char)
else:
a(char)
a('"')
def serialize_float(self, value):
if value != value:
self.raise_.no_nan()
if value == INFINITY:
self.raise_.no_infinity()
if value == -INFINITY:
self.raise_.no_neg_infinity()
self.append(repr(value))
def serialize_complex(self, value):
if value.imag == 0.0:
self.append(repr(value.real))
else:
self.raise_.no_imaginary()
def serialize_decimal(self, value):
if value != value:
self.raise_.no_nan()
s_value = unicode(value)
if s_value == u'Infinity':
self.raise_.no_infinity()
elif s_value == u'-Infinity':
self.raise_.no_neg_infinity()
self.append(s_value)
class StreamSerializer(Serializer):
def __init__(self, fp, *args, **kwargs):
super(StreamSerializer, self).__init__(*args, **kwargs)
self.fp = fp
def append(self, value):
if isinstance(value, str):
value = unicode(value, 'ascii')
if self.encoding is not None:
value = value.encode(self.encoding)
self.fp.write(value)
def serialize(self, value):
self.serialize_object(value, [])
class BufferSerializer(Serializer):
def __init__(self, *args, **kwargs):
super(BufferSerializer, self).__init__(*args, **kwargs)
self.chunks = []
def append(self, value):
self.chunks.append(value)
def serialize(self, value):
self.serialize_object(value, [])
str_result = u''.join(self.chunks)
if self.encoding is None:
return str_result
return str_result.encode(self.encoding)
def dump_impl(value, fp, sort_keys, indent, ascii_only,
coerce_keys, encoding, on_unknown, error_helper,
decimal, userstring):
serializer = StreamSerializer(fp, sort_keys, indent, ascii_only,
coerce_keys, encoding,
on_unknown, error_helper)
return serializer.serialize(value)
def dump(value, fp, sort_keys=False, indent=None, ascii_only=True,
coerce_keys=False, encoding='utf-8', on_unknown=None):
"""Serialize a Python value to a JSON-formatted byte string.
Rather than being returned as a string, the output is written to
a file-like object.
"""
return dump_impl(value, fp, sort_keys,
validate_indent(indent), ascii_only,
coerce_keys, encoding,
validate_on_unknown(on_unknown),
SerializerErrorHelper(),
UserString, Decimal)
def write_impl(value, sort_keys, indent, ascii_only,
coerce_keys, encoding, on_unknown, error_helper,
decimal, userstring):
serializer = BufferSerializer(sort_keys, indent, ascii_only,
coerce_keys, encoding,
on_unknown, error_helper)
return serializer.serialize(value)
def write(value, sort_keys=True, indent=None, ascii_only=True,
coerce_keys=False, encoding='utf-8', on_unknown=None):
"""Serialize a Python value to a JSON-formatted byte string.
.. describe:: value
The Python object to serialize.
.. describe:: sort_keys
Whether object keys should be kept sorted. Useful
for tests, or other cases that check against a
constant string value.
.. describe:: indent
A string to be used for indenting arrays and objects.
If this is non-None, pretty-printing mode is activated.
.. describe:: ascii_only
Whether the output should consist of only ASCII
characters. If this is True, any non-ASCII code points
are escaped even if their inclusion would be legal.
.. describe:: coerce_keys
Whether to coerce invalid object keys to strings. If
this is False, an exception will be raised when an
invalid key is specified.
.. describe:: encoding
The output encoding to use. This must be the name of an
encoding supported by Python's codec mechanism. If
None, a Unicode string will be returned rather than an
encoded bytestring.
If a non-UTF encoding is specified, the resulting
bytestring might not be readable by many JSON libraries,
including jsonlib.
The default encoding is UTF-8.
.. describe:: on_unknown
A callable to be used for converting objects of an
unrecognized type into a JSON expression. If ``None``,
unrecognized objects will raise an ``UnknownSerializerError``.
"""
return write_impl(value, sort_keys, validate_indent(indent), ascii_only,
coerce_keys, encoding,
validate_on_unknown(on_unknown),
SerializerErrorHelper(),
UserString, Decimal)
dumps = write
def validate_indent(indent):
if indent is not None:
indent = unicode(indent)
if not(indent is None or len(indent) == 0):
if len(indent.strip(ALLOWED_WHITESPACE)) > 0:
raise TypeError("Only whitespace may be used for indentation.")
return indent
def validate_on_unknown(f):
def on_unknown(value, unknown):
if f is not None:
return f(value)
unknown(value)
if not(f is None or callable(f)):
raise TypeError("The on_unknown object must be callable.")
return on_unknown
# }}}
try:
from _jsonlib import read_impl, write_impl, dump_impl
except ImportError:
pass
|
{
"content_hash": "6d0c28c204eaf16eeaa915e16e2ffe30",
"timestamp": "",
"source": "github",
"line_count": 829,
"max_line_length": 85,
"avg_line_length": 31.623642943305185,
"alnum_prop": 0.5480241074153189,
"repo_name": "hudora/huTools",
"id": "8e4cedc25879a06fdd9e97164ee3b5e0fda92a11",
"size": "26898",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "huTools/_jsonlib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "415741"
},
{
"name": "Tcl",
"bytes": "89428"
}
],
"symlink_target": ""
}
|
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Scrapy nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from importlib import import_module
from pkgutil import iter_modules
def load_object(path):
"""Load an object given its absolute object path, and return it.
object can be a class, function, variable o instance.
path ie: 'scrapy.contrib.downloadermiddelware.redirect.RedirectMiddleware'
"""
try:
dot = path.rindex('.')
except ValueError:
raise ValueError("Error loading object '%s': not a full path" % path)
module, name = path[:dot], path[dot+1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImportError("Error loading object '%s': %s" % (path, e))
try:
obj = getattr(mod, name)
except AttributeError:
raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
return obj
### END of Scrappy code
|
{
"content_hash": "8d85baf635a794f084bd2f9d1b474927",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 92,
"avg_line_length": 40.411764705882355,
"alnum_prop": 0.7176128093158661,
"repo_name": "bcajes/pipeable",
"id": "c3f6080de884f874a61cd3976bb4e4f6d9f3995b",
"size": "2567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeable/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10784"
}
],
"symlink_target": ""
}
|
from setuptools import find_packages, setup
import os.path
import sys
import os
if os.environ.get('distutils_issue8876_workaround_enabled', False):
# sdist_hack: Remove reference to os.link to disable using hardlinks when
# building setup.py's sdist target. This is done because
# VirtualBox VMs shared filesystems don't support hardlinks.
del os.link
def read_project_version(*py):
py = os.path.join(*py)
__version__ = None
for line in open(py).read().splitlines():
if '__version__' in line:
exec(line)
break
return __version__
NAME = 'sphinxcontrib-eagle'
URL = 'https://github.com/ponty/sphinxcontrib-eagle'
DESCRIPTION = 'Sphinx extension to include image or partlist of eagle schematic or board'
VERSION = read_project_version('sphinxcontrib', 'eagle.py')
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
classifiers = [
# Get more strings from
# http://www.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Documentation',
'Topic :: Utilities',
]
install_requires = open("requirements.txt").read().split('\n')
# compatible with distutils of python 2.3+ or later
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=open('README.rst', 'r').read(),
classifiers=classifiers,
keywords='sphinx eagle',
author='ponty',
# author_email='',
url=URL,
license='BSD',
packages=find_packages(exclude=['bootstrap', 'pavement', ]),
include_package_data=True,
test_suite='nose.collector',
zip_safe=False,
install_requires=install_requires,
namespace_packages=['sphinxcontrib'],
**extra
)
|
{
"content_hash": "ee6112d476ecd4498c4f92803bdc5af4",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 89,
"avg_line_length": 29.47761194029851,
"alnum_prop": 0.6587341772151899,
"repo_name": "ponty/sphinxcontrib-eagle",
"id": "ed2947a5256f05f86a3a60f5b9d2304d86321e7f",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "10719"
}
],
"symlink_target": ""
}
|
"""
Script to print potentially missing source dependencies based on the actual
.h and .cc files in the source tree and which files are included in the gyp
and gn files. The latter inclusion is overapproximated.
TODO(machenbach): Gyp files in src will point to source files in src without a
src/ prefix. For simplicity, all paths relative to src are stripped. But this
tool won't be accurate for other sources in other directories (e.g. cctest).
"""
import itertools
import re
import os
V8_BASE = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
V8_SRC_BASE = os.path.join(V8_BASE, 'src')
V8_INCLUDE_BASE = os.path.join(V8_BASE, 'include')
GYP_FILES = [
os.path.join(V8_BASE, 'src', 'd8.gyp'),
os.path.join(V8_BASE, 'src', 'v8.gyp'),
os.path.join(V8_BASE, 'src', 'third_party', 'vtune', 'v8vtune.gyp'),
os.path.join(V8_BASE, 'test', 'cctest', 'cctest.gyp'),
os.path.join(V8_BASE, 'test', 'unittests', 'unittests.gyp'),
os.path.join(V8_BASE, 'tools', 'parser-shell.gyp'),
]
def path_no_prefix(path):
if path.startswith('../'):
return path_no_prefix(path[3:])
elif path.startswith('src/'):
return path_no_prefix(path[4:])
else:
return path
def isources(directory):
for root, dirs, files in os.walk(directory):
for f in files:
if not (f.endswith('.h') or f.endswith('.cc')):
continue
yield path_no_prefix(os.path.relpath(os.path.join(root, f), V8_BASE))
def iflatten(obj):
if isinstance(obj, dict):
for value in obj.values():
for i in iflatten(value):
yield i
elif isinstance(obj, list):
for value in obj:
for i in iflatten(value):
yield i
elif isinstance(obj, basestring):
yield path_no_prefix(obj)
def iflatten_gyp_file(gyp_file):
"""Overaproximates all values in the gyp file.
Iterates over all string values recursively. Removes '../' path prefixes.
"""
with open(gyp_file) as f:
return iflatten(eval(f.read()))
def iflatten_gn_file(gn_file):
"""Overaproximates all values in the gn file.
Iterates over all double quoted strings.
"""
with open(gn_file) as f:
for line in f.read().splitlines():
match = re.match(r'.*"([^"]*)".*', line)
if match:
yield path_no_prefix(match.group(1))
def icheck_values(values, *source_dirs):
for source_file in itertools.chain(
*[isources(source_dir) for source_dir in source_dirs]
):
if source_file not in values:
yield source_file
gyp_values = set(itertools.chain(
*[iflatten_gyp_file(gyp_file) for gyp_file in GYP_FILES]
))
print "----------- Files not in gyp: ------------"
for i in sorted(icheck_values(gyp_values, V8_SRC_BASE, V8_INCLUDE_BASE)):
print i
gn_values = set(iflatten_gn_file(os.path.join(V8_BASE, 'BUILD.gn')))
print "\n----------- Files not in gn: -------------"
for i in sorted(icheck_values(gn_values, V8_SRC_BASE, V8_INCLUDE_BASE)):
print i
|
{
"content_hash": "af4ad7e54ae47d8e9fdd0f3e17be9671",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 78,
"avg_line_length": 28.782178217821784,
"alnum_prop": 0.6580667354661163,
"repo_name": "kingland/runtime",
"id": "56e3156550aa4c07bac6e87445a0d82f472366c5",
"size": "3094",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "deps/v8/tools/verify_source_deps.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "29659"
},
{
"name": "C",
"bytes": "853"
},
{
"name": "C++",
"bytes": "2140348"
},
{
"name": "JavaScript",
"bytes": "508976"
},
{
"name": "Python",
"bytes": "6268"
},
{
"name": "Shell",
"bytes": "3888"
}
],
"symlink_target": ""
}
|
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
# create an n1dim by n2dim by n3dim random array.
n1dim = 30
n2dim = 15
n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
sys.stdout.write('(average of %s trials)\n\n' % ntrials)
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))
def write_netcdf(filename,complevel,lsd):
file = netCDF4.Dataset(filename,'w',format='NETCDF4')
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f8',('n1','n2','n3','n4'),\
zlib=True,shuffle=True,complevel=complevel,\
least_significant_digit=lsd)
foo[:] = array
file.close()
def read_netcdf(filename):
file = netCDF4.Dataset(filename)
data = file.variables['data'][:]
file.close()
lsd = None
sys.stdout.write('using least_significant_digit %s\n\n' % lsd)
for complevel in range(0,10,2):
sys.stdout.write('testing compression with complevel %s...\n' % complevel)
# writing.
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# print out size of resulting files.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
complevel = 4
sys.stdout.write('\nusing complevel %s\n\n' % complevel)
for lsd in range(1,6):
sys.stdout.write('testing compression with least_significant_digit %s...\n' % lsd)
# writing.
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# print out size of resulting files.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
|
{
"content_hash": "95e2560944e6b7f4f9b7205d85c50cb5",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 106,
"avg_line_length": 39.73015873015873,
"alnum_prop": 0.6360367558929285,
"repo_name": "Unidata/netcdf4-python",
"id": "03919985ec893206648a1aec9617dbea08a9b2d0",
"size": "2604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/bench_compress2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "425"
},
{
"name": "Cython",
"bytes": "334106"
},
{
"name": "Python",
"bytes": "296829"
},
{
"name": "Shell",
"bytes": "469"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='ringmq',
version='0.1',
url='http://github.com/mar29th/ring',
author='Douban Inc.',
author_email='platform@douban.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: System :: Networking',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: Apache Software License',
],
license="http://www.apache.org/licenses/LICENSE-2.0",
packages=['ring', 'ring.tests', 'ring.benchmark']
)
|
{
"content_hash": "2c1b1ed3b5c91c733c1dd9f8956e1715",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 61,
"avg_line_length": 30.761904761904763,
"alnum_prop": 0.5975232198142415,
"repo_name": "mar29th/ring",
"id": "ed624e5c78356425b76cdc5841fa6074f5939fa0",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127640"
}
],
"symlink_target": ""
}
|
"""
=====================================
:mod:`topologies` -- Swarm topologies
=====================================
This module defines various topologies for swarm intelligence algorithms.
Particle swarms make use of topologies, which determine the logical
relationships among particles in the swarm (i.e., which ones belong to the same
"neighborhood"). All topology functions have the following arguments:
- *random* -- the random number generator object
- *population* -- the population of Particles
- *args* -- a dictionary of keyword arguments
Each topology function returns a list of lists of neighbors
for each particle in the population. For example, if a swarm
contained 10 particles, then this function would return a list
containing 10 lists, each of which contained the neighbors for
its corresponding particle in the population.
Rather than constructing and returning a list of lists directly, the
topology functions could (and probably *should*, for efficiency) be
written as generators that yield each neighborhood list one at a
time. This is how the existing topology functions operate.
.. Copyright 2012 Inspired Intelligence Initiative
.. This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.. You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
.. module:: topologies
.. moduleauthor:: Aaron Garrett <aaron.lee.garrett@gmail.com>
"""
def star_topology(random, population, args):
"""Returns the neighbors using a star topology.
This function sets all particles as neighbors for all other particles.
This is known as a star topology. The resulting list of lists of
neighbors is returned.
.. Arguments:
random -- the random number generator object
population -- the population of particles
args -- a dictionary of keyword arguments
"""
for _ in range(len(population)):
yield population[:]
def ring_topology(random, population, args):
"""Returns the neighbors using a ring topology.
This function sets all particles in a specified sized neighborhood
as neighbors for a given particle. This is known as a ring
topology. The resulting list of lists of neighbors is returned.
.. Arguments:
random -- the random number generator object
population -- the population of particles
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *neighborhood_size* -- the width of the neighborhood around a
particle which determines the size of the neighborhood
(default 3)
"""
neighborhood_size = args.setdefault('neighborhood_size', 3)
half_hood = neighborhood_size // 2
neighbor_index_start = []
for index in range(len(population)):
if index < half_hood:
neighbor_index_start.append(len(population) - half_hood + index)
else:
neighbor_index_start.append(index - half_hood)
neighbors = []
for start in neighbor_index_start:
n = []
for i in range(0, neighborhood_size):
n.append(population[(start + i) % len(population)])
yield n
|
{
"content_hash": "2ff805fea7f5db6fc3e9ddb5f1ae2e1f",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 83,
"avg_line_length": 39.618556701030926,
"alnum_prop": 0.6695290137913089,
"repo_name": "saulshanabrook/pushgp.py",
"id": "c166b6d8fe84e2013b164f9e34bb1e30cc41b2f6",
"size": "3843",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "inspyred/swarm/topologies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "277825"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
setup(
name='ph_py',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.1',
description='Product Hunt Beta Python Library',
long_description='Client library for the Product Hunt beta API. Complete with OAuth and read/write capabilities (to use write functionality, your app needs access)',
# The project's main homepage.
url='https://github.com/anatg/ph_py',
# Author details
author='Anat Gilboa and Jasdev Singh',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords=['api', 'product', 'hunt', 'product hunt'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['requests', 'simplejson'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
)
|
{
"content_hash": "79b6bd76f02534834d216be22bcc28bc",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 169,
"avg_line_length": 40.392405063291136,
"alnum_prop": 0.6806643685365089,
"repo_name": "anatg/ph_py",
"id": "f28841053fb7cdef753faff30e89349733902e72",
"size": "3191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22922"
}
],
"symlink_target": ""
}
|
"""
LAS
===
This file defines the classes and functions necessary to read and write LAS
Version 2.0 [1]_ files.
References
----------
.. [1] LAS Version 2.0:
http://www.cwls.org/wp-content/uploads/2014/09/LAS_20_Update_Jan2014.pdf
"""
import numpy as np
from collections import OrderedDict
try:
import builtins
except ImportError:
import __builtin__ as builtins
_VERBOSE = False
class LASFile(object):
"""
A LAS 2.0 file object.
Parameters
----------
filename : str
File name
Attributes
----------
filename : str
File name.
header : OrderedDict
Header of the file. Each section of the LAS header is a value in the
dictionary. The key of each section is its capitalized first letter
(e.g. "V" for VERSION section). Each section is also an OrderedDict, in
which the keys are the mnems of the lines and the lines themselves are
the values. The lines are dicts with 4 keys: "MNEM", "UNIT", "DATA" and
"DESC".
headersectionnames : dict
Names of the header sections. The keys are the capitalized first
letters of each section and the values are the full section names.
headerlayout : dict
Layout of the header. Similar to `header`, but instead of the lines,
contains the layout of the line. The layout of a line is a list of 6
ints, representing the number of whitespaces in the line. The elements
of the list are, respectively, the number of whitespaces between the
beginning of the line and the beginning of the mnem, the end of mnem
and the first dot, the end of the unit and the beginning of the data,
the end of the data and the colon, the colon and the beginning of the
description and the end of the description and the end of the line.
headercomments : dict
Comments in the LAS header. The keys are the linenumber of the comment
and the values are the comments themselves.
data : numpy.ndarray
The data present in the ASCII section of the LAS file. The data shape
is nc x ns, where nc is the number of curves and ns is the
number of samples.
"""
def __init__(self, filename):
self.filename = filename
self.header = OrderedDict()
self.headersectionnames = {}
self.headerlayout = {}
self.headercomments = {}
self.data = np.empty(0)
class LASReader(LASFile):
"""
A specialization of `LASFile` for reading files.
Attributes
----------
wellname : str
Name of the well.
curvesnames : list
Name of each curve.
curvesunits : list
Unit of each curve.
Notes
-----
When creating a `LASReader` object the file is not read immediately. The
`read` method must be called after the creation. Once it is called, all
of its attributes will be read from the file and the file will be closed.
Examples
--------
>>> lasfile = LASReader("filename.las")
>>> lasfile.read()
>>> lasfile.wellname
'wellname'
>>> lasfile.curvesnames
['curve01name', 'curve02name', ...]
>>> lasfile.header["V"]["VERSION"]["DATA"]
'2.0'
>>> lasfile.data[0]
array([1500.0, 1500.2, ...])
"""
def __init__(self, filename):
super(LASReader, self).__init__(filename)
@property
def wellname(self):
return self.header["W"]["WELL"]["DATA"]
@property
def curvesnames(self):
return [line['MNEM'] for line in iter(self.header["C"].values())]
@property
def curvesunits(self):
return [line['UNIT'] for line in iter(self.header["C"].values())]
@staticmethod
def _splitline(line):
"""
Split a LAS line in MNEM, UNITS, DATA and DESCRIPTION.
Parameters
----------
line : str
A non-comment LAS line.
Returns
-------
mnem : str
Mnemoic part of the line
unit : str
Unit part of the line
data : str
Data part of the line
desc : str
Description part of the line
Notes
-----
This method doesn't remove whitespaces from the line parts.
Examples
--------
>>> LASReader._splitline(' DEPTH.M : MEASURED DEPTH ')
(' DEPTH', 'M', ' ', ' MEASURED DEPTH ')
"""
# if ":" not in line:
# desc = ''
# else:
# line, desc = line.rsplit(":", 1)
# desc = desc.strip()
# line = line.strip()
# if " " not in line:
# data = ''
# else:
# line, data = line.rsplit(" ", 1)
# data = data.strip()
# line = line.strip()
# if "." not in line:
# unit = ''
# mnem = line
# else:
# mnem, unit = line.split(".", 1)
# return mnem, unit, data, desc
rest, desc = line.rsplit(":", 1)
mnem, rest = rest.split(".", 1)
unit, data = rest.split(" ", 1)
return mnem, unit, data, desc
@staticmethod
def _getlinelayout(splittedline):
"""
Obtain the layout, i.e. the whitespace structure, of a splitted line.
Parameters
----------
splittedline : list
Contains the four parts of a LAS line with the whitespaces, i.e.
the return of the `_splitline` method
Returns
-------
layout : list
A list of 6 ints, in which each element correspond to the lenght of
a string of whitespaces in the line. The elements of the list are,
respectively, the number of whitespaces between the beginning of
the line and the beginning of the mnem, the end of mnem and the
first dot, the end of the unit and the beginning of the data, the
end of the data and the colon, the colon and the beginning of the
description and the end of the description and the end of the line.
Examples
--------
>>> splittedline = LASReader._splitline(
' DEPTH.M : MEASURED DEPTH ')
>>> LASReader._getlinelayout(splittedline)
[2, 0, 7, 0, 1, 2]
"""
mnem, unit, data, desc = splittedline
layout = []
lmnem = mnem.lstrip()
ldata = data.lstrip()
ldesc = desc.lstrip()
layout.append(len(mnem) - len(lmnem))
layout.append(len(lmnem) - len(lmnem.rstrip()))
layout.append(len(data) - len(ldata))
layout.append(len(ldata) - len(ldata.rstrip()))
layout.append(len(desc) - len(ldesc))
layout.append(len(ldesc) - len(ldesc.rstrip()))
return layout
@staticmethod
def _parseline(line, withlayout=False):
"""
Parse a LAS line in its components and, if specified, its layout.
Parameters
----------
line : str
A non-comment LAS line.
withlayout : bool, optional
Whether the layout must be returned.
Returns
-------
parsedline : dict
A dictionary consisting of the 4 elements of a LAS line, with keys
"MENM", "UNIT", "DATA" and "DESC".
layout : list
A list of 6 ints, in which each element correspond to the lenght of
a string of whitespaces in the line.
Examples
--------
>>> parsedline, layout = LASReader._parseline(
' DEPTH.M : MEASURED DEPTH ', True)
>>> parsedline
{'DATA': '', 'DESC': 'MEASURED DEPTH', 'MNEM': 'DEPTH', 'UNIT': 'M'}
>>> layout
[2, 0, 7, 0, 1, 2]
"""
mnem, unit, data, desc = LASReader._splitline(line)
parsedline = {}
parsedline["MNEM"] = mnem.strip()
parsedline["UNIT"] = unit.strip()
parsedline["DATA"] = data.strip()
parsedline["DESC"] = desc.strip()
if not withlayout:
return parsedline
else:
layout = LASReader._getlinelayout((mnem, unit, data, desc))
return parsedline, layout
@staticmethod
def _getheaderlines(fileobject):
"""
Obtain the LAS header lines from a file object.
Parameters
----------
fileobject : file-like object
The file object from which the header lines will be obtained.
Returns
-------
headerlines : list
A list containing the lines that belong to a LAS file header.
"""
fileobject.seek(0)
headerlines = []
line = fileobject.readline()
while not line.lstrip().startswith('~A'):
headerlines.append(line.replace('\t', ' ')) # TODO: Suportar vários tipos de separadores
line = fileobject.readline()
headerlines.append(line)
return headerlines
@staticmethod
def _getheader(headerlines, withsectionnames=False, withlayout=False, withcomments=False):
"""
Obtain the LAS header from a list of lines.
Parameters
----------
headerlines : list
A list containing the lines that belong to a LAS file header, i.e.
the return of `_getheaderlines` method.
withsectionnames : bool, optional
Whether to return the LAS section names.
withlayout : bool, optional
Whether to return the LAS header layout.
withcomments : bool, optional
Whether to return the LAS header comments.
Returns
-------
header : OrderedDict
Header of a LAS file. Each section of the header is a value in the
dictionary. The key of each section is its capitalized first letter
(e.g. "V" for VERSION section). Each section is also an
OrderedDict, in which the keys are the mnems of the lines and the
lines themselves are the values. The lines are dicts with 4 keys:
"MNEM", "UNIT", "DATA" and "DESC".
sectionnames : dict
Names of the header sections. The keys are the capitalized first
letters of each section and the values are the full section names.
layout : dict
Layout of the header. Similar to `header`, but instead of the
lines, contains the layout of the line.
comments : dict
Comments in the LAS header. The keys are the linenumber of the
comment and the values are the comments themselves.
See Also
--------
_getlinelayout : Obtain the line layout.
"""
global _VERBOSE
header = OrderedDict()
sectionnames = {}
comments = {}
layout = {}
currentsection = None
linecount = 0
for line in headerlines:
if not line:
continue
elif line.lstrip().startswith('#'):
comments[linecount] = line.split('\n')[0]
elif line.lstrip().startswith('~'):
currentsection = []
sectionname = line.split('\n')[0]
sectionkey = sectionname.split('~')[1][0].upper()
header[sectionkey] = currentsection
sectionnames[sectionkey] = sectionname
else:
currentsection.append(line.split('\n')[0])
linecount += 1
for sectionkey, lines in header.items():
try:
section = OrderedDict()
sectionlayout = {}
for line in lines:
parsedline, linelayout = LASReader._parseline(line, True)
# if parsedline['MNEM'] in section:
# print "Curva repetida:", parsedline['MNEM'] # TODO: Fazer algo
# section[parsedline['MNEM']] = parsedline
# sectionlayout[parsedline['MNEM']] = linelayout
# TODO: Melhorar e ver se funcionou
old_mnem = parsedline['MNEM']
new_mnem = old_mnem
count = 0
while new_mnem in section:
count += 1
new_mnem = old_mnem + '_{:0>4}'.format(count)
if _VERBOSE and count:
print("Nome de curva repetido:", old_mnem)
print("Substituindo por:", new_mnem)
parsedline['MNEM'] = new_mnem
section[new_mnem] = parsedline
sectionlayout[new_mnem] = linelayout
if not section:
header[sectionkey] = ''
else:
header[sectionkey] = section
layout[sectionkey] = sectionlayout
except:
header[sectionkey] = '\n'.join(lines)
if (not withsectionnames) and (not withlayout) and (not withcomments):
return header
else:
returns = (header,)
if withsectionnames:
returns += (sectionnames,)
if withlayout:
returns += (layout,)
if withcomments:
returns += (comments,)
return returns
@staticmethod
def _getdatalines(fileobject):
"""
Obtain the LAS ASCII section lines from a file object.
Parameters
----------
fileobject : file-like object
The file object from which the data lines will be obtained.
Returns
-------
datalines : list
A list containing the lines that belong to a LAS file ASCII
section.
"""
fileobject.seek(0)
line = fileobject.readline()
while not line.lstrip().startswith('~A'):
line = fileobject.readline()
datalines = fileobject.readlines()
return datalines
@staticmethod
def _getflatdata(datalines):
"""
Obtain a flat `numpy.ndarray` from a list of data lines.
Concatenate the lines; split the resulting string, convert each element
to float and convert to a `numpy.ndarray`.
Parameters
----------
datalines : list
A list containing the lines that belong to a LAS file ASCII
section.
Returns
-------
flatdata : numpy.ndarray
A flat (i.e. one-dimensional) array containing data from
`datalines`.
"""
flatdata = np.asarray([float(a) for a in ' '.join(datalines).split()])
return flatdata
@staticmethod
def _reshapeflatdata(flatdata, ncurves):
"""
Reshape the flat data into a 2-dimensional data.
The reshaped data will have the same number of elements as `flatdata`
and first dimension with length `ncurves`. This way, `data[0]` will
be the data from the first curve in the file.
Parameters
----------
flatdata : numpy.ndarray
A flat (i.e. one-dimensional) array containing data from a LAS
file.
ncurves : int
Number of existing curves in the same file
Returns
-------
data : numpy.ndarray
Reshaped data with first dimension lenght equal to `ncurves`
"""
data = np.reshape(flatdata, (-1, ncurves)).T
return data
@staticmethod
def _replacenullvalues(data, nullvalue, copy=False):
"""
Replace null values in an array with `np.nan`.
Parameters
----------
data : np.ndarray
Array containing null values to be replaced.
nullvalue : float
The value that will be replaced by `np.nan`.
copy : bool, optional
Whether the operation will be performed in a copy of the data or
in the data itself.
Returns
-------
newdata : np.ndarray
A array with `nullvalue` replaced with `np.nan`.
"""
if copy:
newdata = np.copy(data)
else:
newdata = data
where = (newdata == nullvalue)
newdata[where] = np.nan
return newdata
@staticmethod
def _reorderdata(data, copy=False):
"""
Reorder the data so that the first line is in ascending order.
This method suposes that the first line of `data` is already sorted
in descending order. It will invert the order of the rows in the array,
i.e. the last row will become the first, the second last will become
the second and so on.
Parameters
----------
data : np.ndarray
The array that will be reordered.
copy : bool, optional
Whether the operation will be performed in a copy of the data or
in the data itself.
Returns
-------
newdata : np.ndarray
A array with the rows in reverse order.
"""
if copy:
newdata = np.copy(data)
else:
newdata = data
return newdata[:, ::-1]
def read(self):
"""
Read the file.
Notes
-----
When creating a `LASReader` object the file is not read immediately.
This method must be called after the creation. Once it is called, all
of the instance's attributes will be read from the file and the file
will be closed.
"""
fileobject = builtins.open(self.filename, 'r')
headerlines = LASReader._getheaderlines(fileobject)
datalines = LASReader._getdatalines(fileobject)
fileobject.close()
self.header, self.headersectionnames, self.headerlayout, self.headercomments = LASReader._getheader(headerlines,
True, True,
True)
ncurves = len(self.header["C"])
nullvalue = float(self.header["W"]["NULL"]["DATA"])
stepvalue = float(self.header["W"]["STEP"]["DATA"])
flattendata = LASReader._getflatdata(datalines)
nandata = LASReader._replacenullvalues(flattendata, nullvalue)
self.data = LASReader._reshapeflatdata(nandata, ncurves)
if (stepvalue == nullvalue) or (stepvalue == 0.0):
stepvalue = self.data[0][1] - self.data[0][0]
if stepvalue < 0:
self.data = LASReader._reorderdata(self.data)
class LASWriter(LASFile):
"""
A specialization of `LASFile` for writing files.
Notes
-----
When creating a `LASReader` object the file is not written immediately. The
`header` and `data` attributes must be defined before calling the `write`
method. The other attributes (`headersectionnames`, `headerlayout` and
`headercomments`) are optional.
No verification is done to guarantee that the header and data are
compatible (i.e. have the same number of curves and the same depth range).
There are two methods that can be used for this: `correctwellsection` and
`correctcurvesection`.
In order to get a better layout for the header, the method
`getprettyheaderlayout` may be used.
Examples
--------
>>> lasfile = LASWriter("filename.las")
>>> lasfile.header = existing_header
>>> lasfile.data = existing_data
>>> lasfile.write()
"""
DEFAULTMNEMSTYLE = {'leftmargin': 1, 'rightmargin': 0, 'allign': 'left'}
DEFAULTDATASTYLE = {'leftmargin': 1, 'rightmargin': 1, 'allign': 'left'}
DEFAULTDESCSTYLE = {'leftmargin': 1, 'righmargin': 0, 'allign': 'left'}
DEFAULTUNIFORMSECTIONS = True
MINIMALLINELAYOUT = [0, 0, 1, 0, 0, 0]
LASLINEPATTERN = "{0[0]}{MNEM}{0[1]}.{UNIT}{0[2]}{DATA}{0[3]}:{0[4]}{DESC}{0[5]}"
def __init__(self, filename):
super(LASWriter, self).__init__(filename)
@staticmethod
def _composeline(parsedline, linelayout=None):
"""
Turn a LAS parsed line into a one-string-line.
Parameters
----------
parsedline : dict
A LAS parsed line, i.e. a dict with keys "MNEM", "UNIT", "DATA"
and "DESC" which values are the respective LAS line parts.
linelayout : list, optional
A list containing 6 ints, each representing the number of
whitespaces in a portion of the LAS line. If not provided a minimal
layout will be used.
Returns
-------
line : str
A line composed using the `parsedline` parts and `linelayout`
whitespaces.
Examples
--------
>>> parsedline = {'DATA': '', 'DESC': 'MEASURED DEPTH',
'MNEM': 'DEPTH','UNIT': 'M'}
>>> layout = [2, 0, 7, 0, 1, 2]
>>> LASWriter._composeline(parsedline, linelayout)
' DEPTH.M : MEASURED DEPTH '
"""
if not linelayout:
linelayout = LASWriter.MINIMALLINELAYOUT
line = LASWriter.LASLINEPATTERN.format([" " * n for n in linelayout], **parsedline)
return line
@staticmethod
def _getspaces(style, spaces):
"""
Return the number of left and right whitespaces in a LAS line element.
Here LAS line element refers to either the MNEM, DATA or DESCRIPTION
part of a LAS line (the UNIT part cannot have whitespaces). The
distribution of whitespaces will be done according to `style`.
Parameters
----------
style : dict
A dictionary contaning the style parameters of a LAS line
element. The possible style parameters are 'allign',
'leftmargin' and 'rightmargin'. All of them are optional. If
'allign' is not provided, the other parameters are not used.
'allign' can be 'left', 'center' or 'right' and describes the
allignment of the LAS line element. 'leftmargin' and
'rightmargin' are the number of extra whitespaces to the left
or to the right of the line element, respectively.
spaces : int
Number of whitespaces to be distributed between `right` and
`left`.
Returns
-------
left : int
Number of whitespaces to the left of the LAS line element.
right : int
Number of whitespaces to the right of the LAS line element.
"""
if style.get('allign') == 'left':
left = style.get('leftmargin', 0)
right = style.get('rightmargin', 0) + spaces
elif style.get('allign') == 'center':
left = style.get('leftmargin', 0) + spaces // 2
right = style.get('rightmargin', 0) + (spaces + 1) // 2
elif style.get('allign') == 'right':
left = style.get('leftmargin', 0) + spaces
right = style.get('rightmargin', 0)
else:
left = style.get('leftmargin', 0)
right = style.get('rightmargin', 0)
return left, right
@staticmethod
def getprettyheaderlayout(header, mnemstyle=None, datastyle=None, descstyle=None, uniformsections=None):
"""
Obtain a 'pretty' header layout from a header and style parameters.
The layout will be constructed using the lenghts of the line elements
of each line in each section of the header.
Parameters
----------
header : OrderedDict
The LAS header for which the layout will be created.
mnemstyle : dict, optional
A dictionary containing the style parameters for the MNEM part of
the LAS line. The possible style parameters are 'allign',
'leftmargin' and 'rightmargin'. If not given, a default style will
be used.
datastyle : dict, optional
Same as `mnemstyle`, but for the DATA part instead.
descstyle : dict, optional
Same as `mnemstyle`, but for the DESCRIPTION part instead.
uniformsections : bool, optional
If True, the line elements will have the same lenght across all
sections of the header.
Returns
-------
headerlayout : dict
The pretty header layout that will fit the provided header.
"""
if mnemstyle is None:
mnemstyle = LASWriter.DEFAULTMNEMSTYLE
if datastyle is None:
datastyle = LASWriter.DEFAULTDATASTYLE
if descstyle is None:
descstyle = LASWriter.DEFAULTDESCSTYLE
if uniformsections is None:
uniformsections = LASWriter.DEFAULTUNIFORMSECTIONS
style = {}
style["MNEM"] = mnemstyle
style["DATA"] = datastyle
style["DESC"] = descstyle
allignmnem = bool(style["MNEM"].get('allign'))
alligndata = bool(style["DATA"].get('allign'))
alligndesc = bool(style["DESC"].get('allign'))
sizearrays = {}
for sectionkey, section in header.items():
if isinstance(section, str) or not section:
continue
sizearray = {}
for key in ("MNEM", "UNIT", "DATA", "DESC"):
sizearray[key] = np.array([len(line[key]) for line in iter(section.values())])
sizearrays[sectionkey] = sizearray
usizearray = {}
for key in ("MNEM", "UNIT", "DATA", "DESC"):
usizearray[key] = np.hstack(sizearray[key] for sizearray in iter(sizearrays.values()))
leftpositions = {}
for sectionkey, section in header.items():
if isinstance(section, str) or not section:
continue
leftposition = {}
sizearray = sizearrays[sectionkey]
if uniformsections:
msizearray = usizearray
else:
msizearray = sizearray
mnemleft = np.zeros(len(section))
if not alligndata:
dataleft = np.zeros(len(section))
else:
if allignmnem:
size = sizearray['UNIT']
maxsize = np.max(msizearray['UNIT'])
else:
size = sizearray['MNEM'] + sizearray['UNIT']
maxsize = np.max(msizearray['MNEM'] + msizearray['UNIT'])
dataleft = maxsize - size
if not alligndesc or alligndata:
descleft = np.zeros(len(section))
else:
if allignmnem:
size = sizearray['UNIT'] + sizearray['DATA']
maxsize = np.max(msizearray['UNIT'] + msizearray['DATA'])
else:
size = sizearray['MNEM'] + sizearray['UNIT'] + sizearray['DATA']
maxsize = np.max(msizearray['MNEM'] + msizearray['UNIT'] + msizearray['DATA'])
descleft = maxsize - size
leftposition["MNEM"] = mnemleft
leftposition["DATA"] = dataleft
leftposition["DESC"] = descleft
leftpositions[sectionkey] = leftposition
headerlayout = {}
for sectionkey, section in header.items():
if isinstance(section, str) or not section:
continue
sectionlayout = {}
if uniformsections:
msizearray = usizearray
else:
msizearray = sizearrays[sectionkey]
for i, line in enumerate(iter(section.values())):
linelayout = []
for key in ("MNEM", "DATA", "DESC"):
spaces = np.max(msizearray[key]) - len(line[key])
left, right = LASWriter._getspaces(style[key], spaces)
left += leftpositions[sectionkey][key][i]
linelayout.append(left)
linelayout.append(right)
sectionlayout[line["MNEM"]] = linelayout
headerlayout[sectionkey] = sectionlayout
return headerlayout
@staticmethod
def getemptyheader():
"""
Return an empty LAS header.
Returns
-------
emptyheader : OrderedDict
An empty LAS header.
"""
pass # TODO: implementar
@staticmethod
def correctwellsection(header, depthdata, depthunit, copy=False):
"""
Correct the Well Info section of the header based on the depth data.
The correction consists basically in obtaining the correct STRT, STOP
and STEP parameters from the provided depth data.
Parameters
----------
header : OrderedDict
The LAS header which Well Info section will be corrected.
depthdata : numpy.ndarray
The data of the depth "curve".
depthunit : str
The unit in which `depthdata` was measured.
copy : bool, optional
Whether the correction will be done in a copy of `header` or in
`header` itself.
Returns
-------
hdr : OrderedDict
A LAS header with corrected Well Info section.
"""
if copy:
hdr = header.copy()
else:
hdr = header
start = depthdata[0]
stop = depthdata[-1]
steps = depthdata[1:] - depthdata[:-1]
if np.equal(steps, steps[0]).all():
step = steps[0]
else:
step = 0
hdr["W"]["STRT"]["UNIT"] = depthunit
hdr["W"]["STRT"]["DATA"] = "{:g}".format(start)
hdr["W"]["STOP"]["UNIT"] = depthunit
hdr["W"]["STOP"]["DATA"] = "{:g}".format(stop)
hdr["W"]["STEP"]["UNIT"] = depthunit
hdr["W"]["STEP"]["DATA"] = "{:g}".format(step)
return hdr
@staticmethod
def correctcurvesection(header, mnems, units, keep=False, copy=False):
"""
Correct the Curve Info section of the header.
After the creation and deletion of curves the information existing in
the header will unlikely be suitable to the data. This method will
correct the Curve Info section, so that the header will use the
provided mnems and units.
Parameters
----------
header : OrderedDict
The LAS header which Curve Info section will be corrected.
mnems : list
The mnems of the curves that will exist in the header Curve Info
section.
units : list
The units of the curves that will exist in the header Curve Info
section.
keep : bool, optional
Whether only the necessary parts of the header will be corrected
or it will be built from the ground up.
copy : bool, optional
Whether the correction will be done in a copy of `header` or in
`header` itself.
Returns
-------
hdr : OrderedDict
A LAS header with corrected Curve Info section.
"""
if copy:
hdr = header.copy()
else:
hdr = header
if keep:
toremove = [key for key in hdr if key not in mnems]
for key in toremove:
del hdr[key]
for name, unit in zip(mnems, units):
if name in hdr["C"]:
hdr["C"][name]["UNIT"] = unit
hdr["C"][name] = hdr["C"].pop(name) # Para manter a mesma ordem de names
else:
hdr["C"][name] = {"MNEM": name, "UNIT": unit, "DATA": "", "DESC": ""}
else:
hdr["C"].clear()
for name, unit in zip(mnems, units):
hdr["C"][name] = {"MNEM": name, "UNIT": unit, "DATA": "", "DESC": ""}
return hdr
@staticmethod
def _headertostring(header, sectionnames=None, layout=None, comments=None):
"""
Convert the given LAS header to a string ready for writing into a file.
Parameters
----------
header : OrderedDict
The header to convert into a string.
sectionnames : dict, optional
A dictionary with section names capitalized first letter as keys
and full section names as values. For example, ``sectionnames["V"]
= "VERSION INFORMATION SECTION"``. If not provided the capitalized
first letter will be used as the full section name.
layout : dict, optional
Similar to header, but instead of the lines themselves it contains
the layout of each line. If not given a minimal layout will be
used.
comments : dict, optional
A dictinary which keys are the line numbers of the comment lines
and values are the comment lines themselves. If not given no
comment will be placed in the header string.
Returns
-------
string : str
A string containing all the header lines separated by '\\n'.
"""
lines = []
if not sectionnames:
sectionnames = {}
for key in header.iterkeys():
sectionnames[key] = '~' + key
if not layout:
layout = {}
for key in header.iterkeys():
layout[key] = {}.fromkeys(header[key])
if not comments:
comments = {}
for sectionkey, section in header.items():
while len(lines) in comments:
lines.append(comments[len(lines)])
if not section:
continue
lines.append(sectionnames[sectionkey])
if isinstance(section, str):
for line in section.split('\n'):
while len(lines) in comments:
lines.append(comments[len(lines)])
lines.append(line)
else:
for key, line in section.items():
while len(lines) in comments:
lines.append(comments[len(lines)])
linelayout = layout[sectionkey][key]
lines.append(LASWriter._composeline(line, linelayout))
while len(lines) in comments:
lines.append(comments[len(lines)])
lines.append(sectionnames["A"])
string = '\n'.join(lines)
return string
@staticmethod
def _datatostring(data, nullvalue=-999.25, revertorder=False, wrap=False, allign='right', collumnwidth=10,
maxprecision=8, copy=False): # TODO: rever (alinhar pontos e etc)
"""
Convert the given LAS data to a string ready for writing into a file.
The data is formatted in collumns, such that the first line of `data`
will be placed in the first column, the second line in the second
column and so on.
Parameters
----------
data : np.ndarray
The data to convert into a string.
nullvalue : float, optional
A value that will replace the np.nan in the returned string.
wrap : bool, optional
Whether the output should be wrapped. If True, the each entry of
the first line of `data` will be alone in its line and subsequent
lines will be limitted to 80 characters.
allign : {'right', 'left'}, optional
The allignment of numbers inside the columns.
collumnwidht : int, optional
The width of the columns in which `data` will be formatted.
maxprecision : int, optional
The maximum number of signicant figures in which `data` will be
formatted.
copy : bool, optional
Whether the data that will be used should be a copy of `data`.
Returns
-------
string : str
A string containing `data` formatted appropriately according to the
given parameters.
"""
if copy:
newdata = np.copy(data)
else:
newdata = data
if revertorder:
newdata = newdata[:, ::-1]
isnan = np.isnan(newdata)
newdata[isnan] = nullvalue
if allign == 'left':
allignsymbol = '<'
else:
allignsymbol = '>'
maxwidth = 80
pattern = '{{:{}{}.{}g}}'.format(allignsymbol, collumnwidth, maxprecision)
lines = []
if wrap:
ncolumns = (maxwidth + 1) // (collumnwidth + 1)
nrows = (newdata.shape[0] - 1) // ncolumns
nrest = (newdata.shape[0] - 1) % ncolumns
linepattern = ' '.join([pattern] * ncolumns)
restpattern = ' '.join([pattern] * nrest)
for line in newdata.T:
lines.append(pattern.format(line[0]))
for i in range(nrows):
lines.append(linepattern.format(*line[1 + i * ncolumns:1 + (i + 1) * ncolumns]))
lines.append(restpattern.format(*line[1 + nrows * ncolumns:]))
else:
linepattern = ' '.join([pattern] * newdata.shape[0])
for line in newdata.T:
lines.append(linepattern.format(*line))
string = '\n'.join(lines)
return string
def write(self):
"""
Write the file.
Notes
-----
When creating a `LASReader` object the file is not written immediately.
The `header` and `data` attributes must be defined before calling the
`write` method. The other attributes (`headersectionnames`,
`headerlayout` and `headercomments`) are optional.
No verification is done to guarantee that the header and data are
compatible (i.e. have the same number of curves and the same depth
range). There are two methods that can be used for this:
`correctwellsection` and `correctcurvesection`.
Calling this method may modify `header` and `data` attributes.
"""
headerstring = LASWriter._headertostring(self.header, self.headersectionnames, self.headerlayout,
self.headercomments)
nullvalue = float(self.header["W"]["NULL"]["DATA"])
stepvalue = float(self.header["W"]["STEP"]["DATA"])
revertorder = (stepvalue != nullvalue) and (stepvalue < 0)
wrap = self.header["V"]["WRAP"]["DATA"].upper().startswith("Y")
datastring = LASWriter._datatostring(self.data, nullvalue, revertorder, wrap)
fileobject = builtins.open(self.filename, 'w')
fileobject.write(headerstring)
fileobject.write('\n')
fileobject.write(datastring)
fileobject.close()
def open(name, mode='r'):
"""
Create a new LASFile instance.
Parameters
----------
name : str
The file name.
mode : {'r', 'w'}, optional
The mode in which the file will be opened. If 'r' a LASReader object
is created; if 'w' a LASWriter is created instead.
Returns
-------
lasfile : LASFile
A LASFile object. The actual return type depends on `mode`
Note
----
This function does not work the same way as the builtin `open` function
since the LASFile is not a file-like object, despite its name.
"""
if mode == 'r':
lasfile = LASReader(name)
elif mode == 'w':
lasfile = LASWriter(name)
else:
lasfile = None
return lasfile
def verbose(v=True):
global _VERBOSE
_VERBOSE = v
|
{
"content_hash": "e33b99d838781887e4df505b7e7b8663",
"timestamp": "",
"source": "github",
"line_count": 1125,
"max_line_length": 120,
"avg_line_length": 35.63911111111111,
"alnum_prop": 0.5480371127849554,
"repo_name": "giruenf/GRIPy",
"id": "0992aa6c2a264d666088426004935724e1ade8a8",
"size": "40119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fileio/las.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1593931"
}
],
"symlink_target": ""
}
|
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import collections
import copy
import inspect
import logging
import os
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured # noqa
from django.core.urlresolvers import reverse
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import empty
from django.utils.functional import SimpleLazyObject # noqa
from django.utils.importlib import import_module # noqa
from django.utils.module_loading import module_has_submodule # noqa
from django.utils.translation import ugettext_lazy as _
import six
from horizon import conf
from horizon.decorators import _current_component # noqa
from horizon.decorators import require_auth # noqa
from horizon.decorators import require_perms # noqa
from horizon import loaders
# Name of the panel group for panels to be displayed without a group.
DEFAULT_PANEL_GROUP = 'default'
LOG = logging.getLogger(__name__)
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
pattern._callback = decorator(pattern.callback, *args, **kwargs)
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
# FIXME(lhcheng): We need to find a better way to cache the result.
# Rather than storing it in the session, we could leverage the Django
# session. Currently, this has been causing issue with cookie backend,
# adding 1600+ in the cookie size.
def access_cached(func):
def inner(self, context):
session = context['request'].session
try:
if session['allowed']['valid_for'] != session.get('token'):
raise KeyError()
except KeyError:
session['allowed'] = {"valid_for": session.get('token')}
key = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if key not in session['allowed']:
session['allowed'][key] = func(self, context)
session.modified = True
return session['allowed'][key]
return inner
class NotRegistered(Exception):
pass
@python_2_unicode_compatible
class HorizonComponent(object):
policy_rules = None
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __str__(self):
name = getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
return name
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = patterns('')
return urlpatterns
# FIXME(lhcheng): Removed the access_cached decorator for now until
# a better implementation has been figured out. This has been causing
# issue with cookie backend, adding 1600+ in the cookie size.
# @access_cached
def can_access(self, context):
"""Return whether the user has role based access to this component.
This method is not intended to be overridden.
The result of the method is stored in per-session cache.
"""
return self.allowed(context)
def allowed(self, context):
"""Checks if the user is allowed to access this component.
This method should be overridden to return the result of
any policy checks required for the user to access this component
when more complex checks are required.
"""
return self._can_access(context['request'])
def _can_access(self, request):
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
# this check is an OR check rather than an AND check that is the
# default in the policy engine, so calling each rule individually
if policy_check and self.policy_rules:
for rule in self.policy_rules:
if policy_check((rule,), request):
return True
return False
# default to allowed
return True
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class.__name__)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
found = self._registry.get(cls, None)
if found:
return found
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.slug})
else:
slug = getattr(cls, "slug", cls)
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.' % {"type": class_name,
"slug": slug})
class Panel(HorizonComponent):
"""A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing permission-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any permissions required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
.. staticmethod:: can_register
This optional static method can be used to specify conditions that
need to be satisfied to load this panel. Unlike ``permissions`` and
``allowed`` this method is intended to handle settings based
conditions rather than user based permission and policy checks.
The return value is boolean. If the method returns ``True``, then the
panel will be registered and available to user (if ``permissions`` and
``allowed`` runtime checks are also satisfied). If the method returns
``False``, then the panel will not be registered and will not be
available via normal navigation or direct URL access.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.slug
def get_absolute_url(self):
"""Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except Exception as exc:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.info("Error reversing absolute URL for %s: %s" % (self, exc))
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
@six.python_2_unicode_compatible
class PanelGroup(object):
"""A container for a set of :class:`~horizon.Panel` classes.
When iterated, it will yield each of the ``Panel`` instances it
contains.
.. attribute:: slug
A unique string to identify this panel group. Required.
.. attribute:: name
A user-friendly name which will be used as the group heading in
places such as the navigation. Default: ``None``.
.. attribute:: panels
A list of panel module names which should be contained within this
grouping.
"""
def __init__(self, dashboard, slug=None, name=None, panels=None):
self.dashboard = dashboard
self.slug = slug or getattr(self, "slug", DEFAULT_PANEL_GROUP)
self.name = name or getattr(self, "name", None)
# Our panels must be mutable so it can be extended by others.
self.panels = list(panels or getattr(self, "panels", []))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return self.name
def __iter__(self):
panel_instances = []
for name in self.panels:
try:
panel_instances.append(self.dashboard.get_panel(name))
except NotRegistered as e:
LOG.debug(e)
return iter(panel_instances)
class Dashboard(Registry, HorizonComponent):
"""A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing
permission-based access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a flat list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a list of :class:`~horizon.PanelGroup` classes which
define groups of panels as in the following example::
class SystemPanels(horizon.PanelGroup):
slug = "syspanel"
name = _("System")
panels = ('overview', 'instances', ...)
class Syspanel(horizon.Dashboard):
panels = (SystemPanels,)
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any permissions required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this dashboard should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.slug
def __init__(self, *args, **kwargs):
super(Dashboard, self).__init__(*args, **kwargs)
self._panel_groups = None
def get_panel(self, panel):
"""Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order, without any panel groupings.
"""
all_panels = []
panel_groups = self.get_panel_groups()
for panel_group in panel_groups.values():
all_panels.extend(panel_group)
return all_panels
def get_panel_group(self, slug):
"""Returns the specified :class:~horizon.PanelGroup
or None if not registered
"""
return self._panel_groups.get(slug)
def get_panel_groups(self):
registered = copy.copy(self._registry)
panel_groups = []
# Gather our known panels
if self._panel_groups is not None:
for panel_group in self._panel_groups.values():
for panel in panel_group:
registered.pop(panel.__class__)
panel_groups.append((panel_group.slug, panel_group))
# Deal with leftovers (such as add-on registrations)
if len(registered):
slugs = [panel.slug for panel in registered.values()]
new_group = PanelGroup(self,
slug="other",
name=_("Other"),
panels=slugs)
panel_groups.append((new_group.slug, new_group))
return collections.OrderedDict(panel_groups)
def get_absolute_url(self):
"""Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except Exception:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s." % self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
url_slug = panel.slug.replace('.', '/')
urlpatterns += patterns('',
url(r'^%s/' % url_slug,
include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns += patterns('',
url(r'',
include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, require_auth)
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
"""Discovers panels to register from the current dashboard module."""
if getattr(self, "_autodiscover_complete", False):
return
panels_to_discover = []
panel_groups = []
# If we have a flat iterable of panel names, wrap it again so
# we have a consistent structure for the next step.
if all([isinstance(i, six.string_types) for i in self.panels]):
self.panels = [self.panels]
# Now iterate our panel sets.
default_created = False
for panel_set in self.panels:
# Instantiate PanelGroup classes.
if not isinstance(panel_set, collections.Iterable) and \
issubclass(panel_set, PanelGroup):
panel_group = panel_set(self)
# Check for nested tuples, and convert them to PanelGroups
elif not isinstance(panel_set, PanelGroup):
panel_group = PanelGroup(self, panels=panel_set)
# Put our results into their appropriate places
panels_to_discover.extend(panel_group.panels)
panel_groups.append((panel_group.slug, panel_group))
if panel_group.slug == DEFAULT_PANEL_GROUP:
default_created = True
# Plugin panels can be added to a default panel group. Make sure such a
# default group exists.
if not default_created:
default_group = PanelGroup(self)
panel_groups.insert(0, (default_group.slug, default_group))
self._panel_groups = collections.OrderedDict(panel_groups)
# Do the actual discovery
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
for panel in panels_to_discover:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
self._autodiscover_complete = True
@classmethod
def register(cls, panel):
"""Registers a :class:`~horizon.Panel` with this dashboard."""
panel_class = Horizon.register_panel(cls, panel)
# Support template loading from panel template directories.
panel_mod = import_module(panel.__module__)
panel_dir = os.path.dirname(panel_mod.__file__)
template_dir = os.path.join(panel_dir, "templates")
if os.path.exists(template_dir):
key = os.path.join(cls.slug, panel.slug)
loaders.panel_template_dirs[key] = template_dir
return panel_class
@classmethod
def unregister(cls, panel):
"""Unregisters a :class:`~horizon.Panel` from this dashboard."""
success = Horizon.unregister_panel(cls, panel)
if success:
# Remove the panel's template directory.
key = os.path.join(cls.slug, panel.slug)
if key in loaders.panel_template_dirs:
del loaders.panel_template_dirs[key]
return success
def allowed(self, context):
"""Checks for role based access for this dashboard.
Checks for access to any panels in the dashboard and of the the
dashboard itself.
This method should be overridden to return the result of
any policy checks required for the user to access this dashboard
when more complex checks are required.
"""
# if the dashboard has policy rules, honor those above individual
# panels
if not self._can_access(context['request']):
return False
# check if access is allowed to a single panel,
# the default for each panel is True
for panel in self.get_panels():
if panel.can_access(context):
return True
return False
class Workflow(object):
pass
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is empty:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is empty:
self._setup()
return reversed(self._wrapped)
def __len__(self):
if self._wrapped is empty:
self._setup()
return len(self._wrapped)
def __getitem__(self, idx):
if self._wrapped is empty:
self._setup()
return self._wrapped[idx]
class Site(Registry, HorizonComponent):
"""The overarching class which encompasses all dashboards and panels."""
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.slug
@property
def _conf(self):
return conf.HORIZON_CONFIG
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
"""Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
"""Unregisters a :class:`~horizon.Dashboard` from Horizon."""
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
"""Returns the specified :class:`~horizon.Dashboard` instance."""
return self._registered(dashboard)
def get_dashboards(self):
"""Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = sorted(registered.values())
dashboards.extend(extra)
return dashboards
else:
return sorted(self._registry.values())
def get_default_dashboard(self):
"""Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
"""Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
{"user_home": None,} # Will always return the default dashboard
This can be useful if the default dashboard may not be accessible
to all users. When user_home is missing from HORIZON_CONFIG,
it will default to the settings.LOGIN_REDIRECT_URL value.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, six.string_types):
# Assume we've got a URL if there's a slash in it
if '/' in user_home:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
"""Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
"""Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
"""Constructs the URLconf for Horizon from registered Dashboards."""
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Load the plugin-based panel configuration
self._load_panel_customization()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns += patterns('',
url(r'^%s/' % dash.slug,
include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
"""Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
def _load_panel_customization(self):
"""Applies the plugin-based panel configurations.
This method parses the panel customization from the ``HORIZON_CONFIG``
and make changes to the dashboard accordingly.
It supports adding, removing and setting default panels on the
dashboard. It also support registering a panel group.
"""
panel_customization = self._conf.get("panel_customization", [])
# Process all the panel groups first so that they exist before panels
# are added to them and Dashboard._autodiscover() doesn't wipe out any
# panels previously added when its panel groups are instantiated.
panel_configs = []
for config in panel_customization:
if config.get('PANEL'):
panel_configs.append(config)
elif config.get('PANEL_GROUP'):
self._process_panel_group_configuration(config)
else:
LOG.warning("Skipping %s because it doesn't have PANEL or "
"PANEL_GROUP defined.", config.__name__)
# Now process the panels.
for config in panel_configs:
self._process_panel_configuration(config)
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except ImportError:
LOG.warning("Could not load panel: %s", mod_path)
return
panel = getattr(mod, panel_cls)
# test is can_register method is present and call method if
# it is to determine if the panel should be loaded
if hasattr(panel, 'can_register') and \
callable(getattr(panel, 'can_register')):
if not panel.can_register():
LOG.debug("Load condition failed for panel: %(panel)s",
{'panel': panel_slug})
return
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e})
def _process_panel_group_configuration(self, config):
"""Adds a panel group to the dashboard."""
panel_group_slug = config.get('PANEL_GROUP')
try:
dashboard = config.get('PANEL_GROUP_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_DASHBOARD defined.", config.__name__)
return
dashboard_cls = self.get_dashboard(dashboard)
panel_group_name = config.get('PANEL_GROUP_NAME')
if not panel_group_name:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_NAME defined.", config.__name__)
return
# Create the panel group class
panel_group = type(panel_group_slug,
(PanelGroup, ),
{'slug': panel_group_slug,
'name': panel_group_name,
'panels': []},)
# Add the panel group to dashboard
panels = list(dashboard_cls.panels)
panels.append(panel_group)
dashboard_cls.panels = tuple(panels)
# Trigger the autodiscovery to completely load the new panel group
dashboard_cls._autodiscover_complete = False
dashboard_cls._autodiscover()
except Exception as e:
LOG.warning('Could not process panel group %(panel_group)s: '
'%(exc)s',
{'panel_group': panel_group_slug, 'exc': e})
class HorizonSite(Site):
"""A singleton implementation of Site such that all dealings with horizon
get the same instance no matter what. There can be only one.
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Site, cls).__new__(cls, *args, **kwargs)
return cls._instance
# The one true Horizon
Horizon = HorizonSite()
|
{
"content_hash": "114db7171407634c978adab823180344",
"timestamp": "",
"source": "github",
"line_count": 1000,
"max_line_length": 79,
"avg_line_length": 38.428,
"alnum_prop": 0.5918601020089518,
"repo_name": "Tesora/tesora-horizon",
"id": "04123afdb0b1cdfe9a627d9fcacaf87e8d6ea2dd",
"size": "39033",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "horizon/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "106985"
},
{
"name": "HTML",
"bytes": "482184"
},
{
"name": "JavaScript",
"bytes": "1106973"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4828758"
},
{
"name": "Shell",
"bytes": "30969"
}
],
"symlink_target": ""
}
|
from plenum.common.constants import TRUSTEE, STEWARD
from indy_common.auth import Authoriser
from indy_common.constants import VALIDATOR_INFO
def test_permission_for_validator_info(role):
authorized = role in (TRUSTEE, STEWARD)
assert authorized == Authoriser.authorised(typ=VALIDATOR_INFO,
actorRole=role)[0]
|
{
"content_hash": "3948296d540b26035e35ef53cbab9430",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 66,
"avg_line_length": 36.8,
"alnum_prop": 0.6902173913043478,
"repo_name": "spivachuk/sovrin-node",
"id": "6e2be97bc1051794d9666956d21c804b151bf0e4",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indy_common/test/auth/test_auth_validator_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3329"
},
{
"name": "Dockerfile",
"bytes": "7269"
},
{
"name": "Groovy",
"bytes": "8984"
},
{
"name": "Makefile",
"bytes": "11151"
},
{
"name": "Python",
"bytes": "1681637"
},
{
"name": "Ruby",
"bytes": "65393"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "132633"
}
],
"symlink_target": ""
}
|
from src.base.test_cases import TestCases
class PerfectSquaresTestCases(TestCases):
def __init__(self):
super(PerfectSquaresTestCases, self).__init__()
self.__add_test_case__("Example 1", 12, 3)
self.__add_test_case__("Example 2", 13, 2)
|
{
"content_hash": "00e58a5d5829ea1f90fb895d2c4758e7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 33.375,
"alnum_prop": 0.6404494382022472,
"repo_name": "hychrisli/PyAlgorithms",
"id": "54310ea3fb5e155e275df928f27e9929de553326",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/part1/q279_test_perfect_squares.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "201747"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class FailoverGroupReadOnlyEndpoint(Model):
"""Read-only endpoint of the failover group instance.
:param failover_policy: Failover policy of the read-only endpoint for the
failover group. Possible values include: 'Disabled', 'Enabled'
:type failover_policy: str or
~azure.mgmt.sql.models.ReadOnlyEndpointFailoverPolicy
"""
_attribute_map = {
'failover_policy': {'key': 'failoverPolicy', 'type': 'str'},
}
def __init__(self, failover_policy=None):
super(FailoverGroupReadOnlyEndpoint, self).__init__()
self.failover_policy = failover_policy
|
{
"content_hash": "5bcd9ff0e17795523b6c167025ff162b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 34,
"alnum_prop": 0.6934984520123839,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "be2fed191312a8ca68215c3877be041c3394c5a9",
"size": "1120",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-sql/azure/mgmt/sql/models/failover_group_read_only_endpoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
"""HTTP server classes.
Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see
SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST,
and CGIHTTPRequestHandler for CGI scripts.
It does, however, optionally implement HTTP/1.1 persistent connections,
as of version 0.3.
Notes on CGIHTTPRequestHandler
------------------------------
This class implements GET and POST requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
subprocess.Popen() is used as a fallback, with slightly altered semantics.
In all cases, the implementation is intentionally naive -- all
requests are executed synchronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.6"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import html
import http.client
import io
import mimetypes
import os
import posixpath
import select
import shutil
import socket # For gethostbyaddr()
import socketserver
import sys
import time
import urllib.parse
import copy
import argparse
# Default error message template
DEFAULT_ERROR_MESSAGE = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code: %(code)d</p>
<p>Message: %(message)s.</p>
<p>Error code explanation: %(code)s - %(explain)s.</p>
</body>
</html>
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(socketserver.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
socketserver.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of email.message.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = str(self.raw_requestline, 'iso-8859-1')
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive.
try:
self.headers = http.client.parse_headers(self.rfile,
_class=self.MessageClass)
except http.client.LineTooLong:
self.send_error(400, "Line too long")
return False
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
# Examine the headers and look for an Expect directive
expect = self.headers.get('Expect', "")
if (expect.lower() == "100-continue" and
self.protocol_version >= "HTTP/1.1" and
self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return True
def handle_expect_100(self):
"""Decide what to do with an "Expect: 100-continue" header.
If the client is expecting a 100 Continue response, we must
respond with either a 100 Continue or a final response before
waiting for the request body. The default is to always respond
with a 100 Continue. You can behave differently (for example,
reject unauthorized requests) by overriding this method.
This method should either return True (possibly after sending
a 100 Continue response) or send an error response and return
False.
"""
self.send_response_only(100)
self.end_headers()
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout as e:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None, explain=None):
"""Send and log an error reply.
Arguments are
* code: an HTTP error code
3 digits
* message: a simple optional 1 line reason phrase.
*( HTAB / SP / VCHAR / %x80-FF )
defaults to short entry matching the response code
* explain: a detailed message defaults to the long entry
matching the response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
shortmsg, longmsg = self.responses[code]
except KeyError:
shortmsg, longmsg = '???', '???'
if message is None:
message = shortmsg
if explain is None:
explain = longmsg
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': _quote_html(explain)})
body = content.encode('UTF-8', 'replace')
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.send_header('Content-Length', int(len(body)))
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(body)
def send_response(self, code, message=None):
"""Add the response header to the headers buffer and log the
response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_response_only(self, code, message=None):
"""Send the response header only."""
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(("%s %d %s\r\n" %
(self.protocol_version, code, message)).encode(
'latin-1', 'strict'))
def send_header(self, keyword, value):
"""Send a MIME header to the headers buffer."""
if self.request_version != 'HTTP/0.9':
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(
("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict'))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self._headers_buffer.append(b"\r\n")
self.flush_headers()
def flush_headers(self):
if hasattr(self, '_headers_buffer'):
self.wfile.write(b"".join(self._headers_buffer))
self._headers_buffer = []
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client ip and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address."""
return self.client_address[0]
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# MessageClass used to parse headers
MessageClass = http.client.HTTPMessage
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616 and 6585.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
428: ('Precondition Required',
'The origin server requires the request to be conditional.'),
429: ('Too Many Requests', 'The user has sent too many requests '
'in a given amount of time ("rate limiting").'),
431: ('Request Header Fields Too Large', 'The server is unwilling to '
'process the request because its header fields are too large.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
511: ('Network Authentication Required',
'The client needs to authenticate to gain network access.'),
}
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
try:
self.copyfile(f, self.wfile)
finally:
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
parts = urllib.parse.urlsplit(self.path)
if not parts.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
new_parts = (parts[0], parts[1], parts[2] + '/',
parts[3], parts[4])
new_url = urllib.parse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except OSError:
self.send_error(404, "File not found")
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except OSError:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
r = []
try:
displaypath = urllib.parse.unquote(self.path,
errors='surrogatepass')
except UnicodeDecodeError:
displaypath = urllib.parse.unquote(path)
displaypath = html.escape(displaypath)
enc = sys.getfilesystemencoding()
title = 'Directory listing for %s' % displaypath
r.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd">')
r.append('<html>\n<head>')
r.append('<meta http-equiv="Content-Type" '
'content="text/html; charset=%s">' % enc)
r.append('<title>%s</title>\n</head>' % title)
r.append('<body>\n<h1>%s</h1>' % title)
r.append('<hr>\n<ul>')
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
r.append('<li><a href="%s">%s</a></li>'
% (urllib.parse.quote(linkname,
errors='surrogatepass'),
html.escape(displayname)))
r.append('</ul>\n<hr>\n</body>\n</html>\n')
encoded = '\n'.join(r).encode(enc, 'surrogateescape')
f = io.BytesIO()
f.write(encoded)
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html; charset=%s" % enc)
self.send_header("Content-Length", str(len(encoded)))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/')
try:
path = urllib.parse.unquote(path, errors='surrogatepass')
except UnicodeDecodeError:
path = urllib.parse.unquote(path)
path = posixpath.normpath(path)
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
# Utilities for CGIHTTPRequestHandler
def _url_collapse_path(path):
"""
Given a URL path, remove extra '/'s and '.' path elements and collapse
any '..' references and returns a colllapsed path.
Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
The utility of this function is limited to is_cgi method and helps
preventing some security attacks.
Returns: A tuple of (head, tail) where tail is everything after the final /
and head is everything before it. Head will always start with a '/' and,
if it contains anything else, never have a trailing '/'.
Raises: IndexError if too many '..' occur within the path.
"""
# Similar to os.path.split(os.path.normpath(path)) but specific to URL
# path semantics rather than local operating system semantics.
path_parts = path.split('/')
head_parts = []
for part in path_parts[:-1]:
if part == '..':
head_parts.pop() # IndexError if more '..' than prior parts
elif part and part != '.':
head_parts.append( part )
if path_parts:
tail_part = path_parts.pop()
if tail_part:
if tail_part == '..':
head_parts.pop()
tail_part = ''
elif tail_part == '.':
tail_part = ''
else:
tail_part = ''
splitpath = ('/' + '/'.join(head_parts), tail_part)
collapsed_path = "/".join(splitpath)
return collapsed_path
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(x[2] for x in pwd.getpwall())
return nobody
def executable(path):
"""Test for executable file."""
return os.access(path, os.X_OK)
class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string).
"""
collapsed_path = _url_collapse_path(urllib.parse.unquote(self.path))
dir_sep = collapsed_path.find('/', 1)
head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
if head in self.cgi_directories:
self.cgi_info = head, tail
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
dir, rest = self.cgi_info
path = dir + '/' + rest
i = path.find('/', len(dir)+1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir)+1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if self.have_fork or not ispy:
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = copy.deepcopy(os.environ)
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.parse.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.get("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = authorization[1].encode('ascii')
authorization = base64.decodebytes(authorization).\
decode('ascii')
except (binascii.Error, UnicodeError):
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.get('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.get('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.get_all('cookie', []))
cookie_str = ', '.join(co)
if cookie_str:
env['HTTP_COOKIE'] = cookie_str
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
self.send_response(200, "Script output follows")
self.flush_headers()
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except OSError:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
# Non-Unix -- use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = [interp, '-u'] + cmdline
if '=' not in query:
cmdline.append(query)
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
p = subprocess.Popen(cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env = env
)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
stdout, stderr = p.communicate(data)
self.wfile.write(stdout)
if stderr:
self.log_error('%s', stderr)
p.stderr.close()
p.stdout.close()
status = p.returncode
if status:
self.log_error("CGI script exit status %#x", status)
else:
self.log_message("CGI script exited OK")
def test(HandlerClass=BaseHTTPRequestHandler,
ServerClass=HTTPServer, protocol="HTTP/1.0", port=8000, bind=""):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
server_address = (bind, port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
httpd.server_close()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cgi', action='store_true',
help='Run as CGI Server')
parser.add_argument('--bind', '-b', default='', metavar='ADDRESS',
help='Specify alternate bind address '
'[default: all interfaces]')
parser.add_argument('port', action='store',
default=8000, type=int,
nargs='?',
help='Specify alternate port [default: 8000]')
args = parser.parse_args()
if args.cgi:
handler_class = CGIHTTPRequestHandler
else:
handler_class = SimpleHTTPRequestHandler
test(HandlerClass=handler_class, port=args.port, bind=args.bind)
|
{
"content_hash": "e9d82ee43617a36966a16dba5c240577",
"timestamp": "",
"source": "github",
"line_count": 1245,
"max_line_length": 100,
"avg_line_length": 37.24096385542169,
"alnum_prop": 0.575520327833495,
"repo_name": "Bjay1435/capstone",
"id": "cfa29f44d351cfd3ac6e8ea05d18b6d43c148e2b",
"size": "46365",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "rootfs/usr/lib/python3.4/http/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "1C Enterprise",
"bytes": "841"
},
{
"name": "Awk",
"bytes": "10779"
},
{
"name": "Batchfile",
"bytes": "35708"
},
{
"name": "C",
"bytes": "12874998"
},
{
"name": "C++",
"bytes": "8412918"
},
{
"name": "CMake",
"bytes": "2014897"
},
{
"name": "CSS",
"bytes": "17738"
},
{
"name": "Coq",
"bytes": "29047"
},
{
"name": "Emacs Lisp",
"bytes": "28540"
},
{
"name": "FORTRAN",
"bytes": "1781"
},
{
"name": "Forth",
"bytes": "2078"
},
{
"name": "Go",
"bytes": "15064"
},
{
"name": "Groff",
"bytes": "325029"
},
{
"name": "HTML",
"bytes": "3226229"
},
{
"name": "JavaScript",
"bytes": "83945"
},
{
"name": "Logos",
"bytes": "116581"
},
{
"name": "M4",
"bytes": "1138587"
},
{
"name": "Makefile",
"bytes": "60298"
},
{
"name": "Matlab",
"bytes": "2038"
},
{
"name": "Objective-C",
"bytes": "11673"
},
{
"name": "Pascal",
"bytes": "37459"
},
{
"name": "Perl",
"bytes": "18475849"
},
{
"name": "Perl6",
"bytes": "1084056"
},
{
"name": "Prolog",
"bytes": "1416519"
},
{
"name": "Python",
"bytes": "13249253"
},
{
"name": "Shell",
"bytes": "5189424"
},
{
"name": "Smalltalk",
"bytes": "21514"
},
{
"name": "Stata",
"bytes": "13166"
},
{
"name": "SystemVerilog",
"bytes": "213462"
},
{
"name": "Tcl",
"bytes": "134338"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "VHDL",
"bytes": "57795938"
},
{
"name": "Verilog",
"bytes": "8965533"
},
{
"name": "VimL",
"bytes": "12589"
},
{
"name": "XC",
"bytes": "17727"
},
{
"name": "XS",
"bytes": "17577"
},
{
"name": "Yacc",
"bytes": "7489"
}
],
"symlink_target": ""
}
|
"""
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example shows characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. With the exception of the last dataset,
the parameters of each of these dataset-algorithm pairs
has been tuned to produce good clustering results. Some
algorithms are more sensitive to parameter values than
others.
The last dataset is an example of a 'null' situation for
clustering: the data is homogeneous, and there is no good
clustering. For this example, the null dataset uses the
same parameters as the dataset in the row above it, which
represents a mismatch in the parameter values and the
data structure.
While these examples give some intuition about the
algorithms, this intuition might not apply to very high
dimensional data.
"""
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets, mixture
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from itertools import cycle, islice
np.random.seed(0)
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(
n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state
)
# ============
# Set up cluster parameters
# ============
plt.figure(figsize=(9 * 2 + 3, 13))
plt.subplots_adjust(
left=0.02, right=0.98, bottom=0.001, top=0.95, wspace=0.05, hspace=0.01
)
plot_num = 1
default_base = {
"quantile": 0.3,
"eps": 0.3,
"damping": 0.9,
"preference": -200,
"n_neighbors": 3,
"n_clusters": 3,
"min_samples": 7,
"xi": 0.05,
"min_cluster_size": 0.1,
}
datasets = [
(
noisy_circles,
{
"damping": 0.77,
"preference": -240,
"quantile": 0.2,
"n_clusters": 2,
"min_samples": 7,
"xi": 0.08,
},
),
(
noisy_moons,
{
"damping": 0.75,
"preference": -220,
"n_clusters": 2,
"min_samples": 7,
"xi": 0.1,
},
),
(
varied,
{
"eps": 0.18,
"n_neighbors": 2,
"min_samples": 7,
"xi": 0.01,
"min_cluster_size": 0.2,
},
),
(
aniso,
{
"eps": 0.15,
"n_neighbors": 2,
"min_samples": 7,
"xi": 0.1,
"min_cluster_size": 0.2,
},
),
(blobs, {"min_samples": 7, "xi": 0.1, "min_cluster_size": 0.2}),
(no_structure, {}),
]
for i_dataset, (dataset, algo_params) in enumerate(datasets):
# update parameters with dataset-specific values
params = default_base.copy()
params.update(algo_params)
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=params["quantile"])
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(
X, n_neighbors=params["n_neighbors"], include_self=False
)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# ============
# Create cluster objects
# ============
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params["n_clusters"], n_init="auto")
ward = cluster.AgglomerativeClustering(
n_clusters=params["n_clusters"], linkage="ward", connectivity=connectivity
)
spectral = cluster.SpectralClustering(
n_clusters=params["n_clusters"],
eigen_solver="arpack",
affinity="nearest_neighbors",
)
dbscan = cluster.DBSCAN(eps=params["eps"])
optics = cluster.OPTICS(
min_samples=params["min_samples"],
xi=params["xi"],
min_cluster_size=params["min_cluster_size"],
)
affinity_propagation = cluster.AffinityPropagation(
damping=params["damping"], preference=params["preference"], random_state=0
)
average_linkage = cluster.AgglomerativeClustering(
linkage="average",
metric="cityblock",
n_clusters=params["n_clusters"],
connectivity=connectivity,
)
birch = cluster.Birch(n_clusters=params["n_clusters"])
gmm = mixture.GaussianMixture(
n_components=params["n_clusters"], covariance_type="full"
)
clustering_algorithms = (
("MiniBatch\nKMeans", two_means),
("Affinity\nPropagation", affinity_propagation),
("MeanShift", ms),
("Spectral\nClustering", spectral),
("Ward", ward),
("Agglomerative\nClustering", average_linkage),
("DBSCAN", dbscan),
("OPTICS", optics),
("BIRCH", birch),
("Gaussian\nMixture", gmm),
)
for name, algorithm in clustering_algorithms:
t0 = time.time()
# catch warnings related to kneighbors_graph
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the "
+ "connectivity matrix is [0-9]{1,2}"
+ " > 1. Completing it to avoid stopping the tree early.",
category=UserWarning,
)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding"
+ " may not work as expected.",
category=UserWarning,
)
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, "labels_"):
y_pred = algorithm.labels_.astype(int)
else:
y_pred = algorithm.predict(X)
plt.subplot(len(datasets), len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
colors = np.array(
list(
islice(
cycle(
[
"#377eb8",
"#ff7f00",
"#4daf4a",
"#f781bf",
"#a65628",
"#984ea3",
"#999999",
"#e41a1c",
"#dede00",
]
),
int(max(y_pred) + 1),
)
)
)
# add black color for outliers (if any)
colors = np.append(colors, ["#000000"])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plt.text(
0.99,
0.01,
("%.2fs" % (t1 - t0)).lstrip("0"),
transform=plt.gca().transAxes,
size=15,
horizontalalignment="right",
)
plot_num += 1
plt.show()
|
{
"content_hash": "8995162ebf50b69b9c20f02201375541",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 87,
"avg_line_length": 29.810606060606062,
"alnum_prop": 0.5515883100381195,
"repo_name": "betatim/scikit-learn",
"id": "c27c1f2d45ce3a6edf8c8988ca9f4f1afd3e5b54",
"size": "7870",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "examples/cluster/plot_cluster_comparison.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668499"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10504881"
},
{
"name": "Shell",
"bytes": "41551"
}
],
"symlink_target": ""
}
|
import sys
#sys.path.insert(0, "../")
#from pyes import ES
from pyes.es import ES
from datetime import datetime
import shelve
conn = ES('127.0.0.1:9500')
#conn = ES('192.168.2.50:9200')
try:
conn.delete_index("test-index")
except:
pass
dataset = shelve.open("samples.shelve")
mapping = {u'description': {'boost': 1.0,
'index': 'analyzed',
'store': 'true',
'type': u'string',
"term_vector": "with_positions_offsets"
},
u'name': {'boost': 1.0,
'index': 'analyzed',
'store': 'true',
'type': u'string',
"term_vector": "with_positions_offsets"
},
u'age': {'store': 'true',
'type': u'integer'},
}
conn.create_index("test-index")
conn.put_mapping("test-type", {'properties': mapping}, ["test-index"])
start = datetime.now()
for k, userdata in dataset.items():
# conn.index(userdata, "test-index", "test-type", k)
conn.index(userdata, "test-index", "test-type", k, bulk=True)
conn.force_bulk()
end = datetime.now()
print "time:", end - start
dataset.close()
|
{
"content_hash": "b3e3d1cba8daa3128d3177a77ced90ec",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 70,
"avg_line_length": 27.377777777777776,
"alnum_prop": 0.5146103896103896,
"repo_name": "aparo/pyes",
"id": "de4c1fc7ba917a0bcca9c1f7f0e3e59180d390be",
"size": "1232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "performance/performance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1143"
},
{
"name": "Python",
"bytes": "602961"
},
{
"name": "Shell",
"bytes": "1438"
}
],
"symlink_target": ""
}
|
from pbr import version
version_info = version.VersionInfo('driverlog')
|
{
"content_hash": "79105e08cab3d22800b7acb5f6adbafd",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 47,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.7945205479452054,
"repo_name": "tracyajones/driverlog",
"id": "19e8e5c80fc07c4b65fb0cd4fa25cbfd14736ade",
"size": "656",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "driverlog/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name = 'fastmc',
version = '1.8.0-alpha1',
description = 'Fast Minecraft Protocol Parser/Writer',
author = 'Florian Wesch',
author_email = 'fw@dividuum.de',
packages = ['fastmc'],
license = 'BSD2',
install_requires = ['requests', 'pycrypto', 'simplejson'],
zip_safe = True,
)
|
{
"content_hash": "1f7046c9d18a0534b04da629b61bf142",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 26.846153846153847,
"alnum_prop": 0.6246418338108882,
"repo_name": "dividuum/fastmc",
"id": "e3389705d29743c4c8c15003deb44eecf5a15cae",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "109828"
}
],
"symlink_target": ""
}
|
import sys
from . import parser
import wamcompiler
import waminterpreter
import factplugin
if __name__ == "__main__":
rules = parser.RuleCollection()
rules.add(parser.Parser().parseRule('p(X,Y) :- spouse(X,Y) {r}.'))
rules.listing()
wp = wamcompiler.Compiler().compileRules(rules)
wp.listing()
fp = factplugin.FactPlugin.load('../test/fam.cfacts')
wi = waminterpreter.Interpreter(wp,plugins=[fp])
print(wi.plugins)
query = parser.Parser().parseQuery('p(X,Y).')
print(query)
answers = waminterpreter.Util.answer(wi,query)
print(answers)
|
{
"content_hash": "e434f226a59040a1c0a94b3a6cfe4a8d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 29.3,
"alnum_prop": 0.6723549488054608,
"repo_name": "TeamCohen/TensorLog",
"id": "b5d2cde9222de6c2b222fa76c882f22ec7b16a69",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/working",
"path": "tensorlog/trywam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "6073"
},
{
"name": "Python",
"bytes": "526823"
},
{
"name": "Shell",
"bytes": "1979"
}
],
"symlink_target": ""
}
|
"""Convert a spreadsheet CAH from
http://boardgamegeek.com/thread/947849/hopefully-more-than-complete-listing-of-all-offici
https://docs.google.com/spreadsheet/ccc?key=0Ajv9fdKngBJ_dHFvZjBzZDBjTE16T3JwNC0tRlp6Wnc#gid=10
https://docs.google.com/spreadsheet/ccc?key=0Ajv9fdKngBJ_dHFvZjBzZDBjTE16T3JwNC0tRlp6Wnc&output=xls
mkdir data
cd data
wget 'https://docs.google.com/spreadsheet/ccc?key=0Ajv9fdKngBJ_dHFvZjBzZDBjTE16T3JwNC0tRlp6Wnc&output=xls' '--output-document=Cards Against Humanity versions.xlsx'
into db
"""
import os
import sys
import sqlite3
import urllib2
import xls2db # from https://github.com/clach04/xls2db/
import xlrd
from card_fixturegen import DJANGO_CARDS_DATA_DIR
# json support, TODO consider http://pypi.python.org/pypi/omnijson
try:
# Python 2.6+
import json
except ImportError:
try:
# from http://code.google.com/p/simplejson
import simplejson as json
except ImportError:
json = None
if json is None:
def dump_json(x, indent=None):
"""dumb not safe!
Works for the purposes of this specific script as quotes never
appear in data set.
Parameter indent ignored"""
if indent:
result = pprint.pformat(x, indent)
else:
result = repr(x).replace("'", '"')
return result
def load_json(x):
"""dumb not safe! Works for the purposes of this specific script"""
x = x.replace('\r', '')
return eval(x)
else:
dump_json = json.dumps
load_json = json.loads
DEFAULT_BLANK_MARKER = u"\uFFFD" # u'_'
data_dir = os.path.join(os.path.dirname(__file__), 'data')
class MyHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
print "302 redirecting...."
return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
def wget(url):
print 'opening %s' % url
cookieprocessor = urllib2.HTTPCookieProcessor()
opener = urllib2.build_opener(MyHTTPRedirectHandler, cookieprocessor)
urllib2.install_opener(opener)
response = urllib2.urlopen(url)
result = response.read()
response.close()
return result
def x2db():
db.commit()
db.close()
def doit():
column_name_start_row = 2 # NOTE 0 is the first line (so row number in sheet - 1)
data_start_row = 4 # NOTE 0 is the first line (so row number in sheet - 1)
dbname = ':memory:'
#dbname = os.path.join(data_dir, 'tmpdb.db')
db = sqlite3.connect(dbname)
do_drop = True
xfdata = wget('https://docs.google.com/spreadsheet/ccc?key=0Ajv9fdKngBJ_dHFvZjBzZDBjTE16T3JwNC0tRlp6Wnc&output=xls')
xf = xlrd.open_workbook(file_contents=xfdata)
xls2db.xls2db(xf, db, column_name_start_row=column_name_start_row, data_start_row=data_start_row, do_drop=do_drop)
c = db.cursor()
all_cards = []
dumb_restrict = """ where "v1.4" is not NULL and "v1.4" <> '' """
print dumb_restrict
c.execute(""" select "Text" as text, "Special" as special from "Main Deck Black" """ + dumb_restrict + 'order by text')
print c.description
for row_id, row in enumerate(c.fetchall(), 1):
draw = 0
print row_id, row
card_text = row[0]
special = row[1]
card_text = card_text.replace('______', DEFAULT_BLANK_MARKER)
if '_' in card_text:
raise NotImplementedError('found an underscore, this may not be a real problem')
pick = card_text.count(DEFAULT_BLANK_MARKER)
if pick < 1:
pick = 1
"""
if '+' in card_text:
import pdb ; pdb.set_trace()
"""
if special:
print row
if special == 'PICK 2':
pick = 2
elif special == 'DRAW 2, PICK 3':
draw = 2
pick = 3
else:
raise NotImplementedError('unrecognized special')
black_card = {"pk": row_id, "model": "cards.blackcard", "fields": {"text":card_text, "draw": draw, "watermark": "v1.4", "pick": pick}}
all_cards.append(black_card)
c.execute(""" select "Text" as text from "Main Deck White" """ + dumb_restrict + 'order by text')
print c.description
for row_id, row in enumerate(c.fetchall(), 1):
print row_id, row
card_text = row[0]
white_card = {"pk": row_id, "model": "cards.whitecard", "fields": {"text":card_text, "watermark": "v1.4"}}
all_cards.append(white_card)
db.commit()
db.close()
filename = os.path.join(DJANGO_CARDS_DATA_DIR, 'initial_data.json')
print 'writing %s' % filename
data = dump_json(all_cards, indent=4)
# now "fix" indentation to match Djago fixture formatting
new_data = []
for line in data.split('\n'):
if line.startswith(' '):
line = line[4:]
if line.endswith('}, '):
line = line[:-1]
new_data.append(line)
data = '\n'.join(new_data)
f = open(filename, 'wb')
f.write(data)
f.write('\n')
f.close()
def main(argv=None):
if argv is None:
argv = sys.argv
doit()
return 0
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "19acaff8df3743da56c66e7284fc05c1",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 171,
"avg_line_length": 29.87845303867403,
"alnum_prop": 0.6022559171597633,
"repo_name": "phildini/cards-against-django",
"id": "f1cb40fe78d1afa73c33a775350a8c16dae639c9",
"size": "5508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/sheet2json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "788"
},
{
"name": "HTML",
"bytes": "15831"
},
{
"name": "JavaScript",
"bytes": "1383"
},
{
"name": "Puppet",
"bytes": "4102"
},
{
"name": "Python",
"bytes": "118269"
},
{
"name": "Ruby",
"bytes": "179"
},
{
"name": "Shell",
"bytes": "4595"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_by_billing_account_request(
billing_account_name: str, billing_role_definition_name: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingRoleDefinitions/{billingRoleDefinitionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
"billingRoleDefinitionName": _SERIALIZER.url(
"billing_role_definition_name", billing_role_definition_name, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_by_invoice_section_request(
billing_account_name: str,
billing_profile_name: str,
invoice_section_name: str,
billing_role_definition_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}/billingRoleDefinitions/{billingRoleDefinitionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
"billingProfileName": _SERIALIZER.url("billing_profile_name", billing_profile_name, "str"),
"invoiceSectionName": _SERIALIZER.url("invoice_section_name", invoice_section_name, "str"),
"billingRoleDefinitionName": _SERIALIZER.url(
"billing_role_definition_name", billing_role_definition_name, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_by_billing_profile_request(
billing_account_name: str, billing_profile_name: str, billing_role_definition_name: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/billingRoleDefinitions/{billingRoleDefinitionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
"billingProfileName": _SERIALIZER.url("billing_profile_name", billing_profile_name, "str"),
"billingRoleDefinitionName": _SERIALIZER.url(
"billing_role_definition_name", billing_role_definition_name, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_billing_account_request(billing_account_name: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingRoleDefinitions"
) # pylint: disable=line-too-long
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_invoice_section_request(
billing_account_name: str, billing_profile_name: str, invoice_section_name: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}/billingRoleDefinitions",
) # pylint: disable=line-too-long
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
"billingProfileName": _SERIALIZER.url("billing_profile_name", billing_profile_name, "str"),
"invoiceSectionName": _SERIALIZER.url("invoice_section_name", invoice_section_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_billing_profile_request(
billing_account_name: str, billing_profile_name: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/billingRoleDefinitions",
) # pylint: disable=line-too-long
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
"billingProfileName": _SERIALIZER.url("billing_profile_name", billing_profile_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class BillingRoleDefinitionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.billing.BillingManagementClient`'s
:attr:`billing_role_definitions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get_by_billing_account(
self, billing_account_name: str, billing_role_definition_name: str, **kwargs: Any
) -> _models.BillingRoleDefinition:
"""Gets the definition for a role on a billing account. The operation is supported for billing
accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param billing_role_definition_name: The ID that uniquely identifies a role definition.
Required.
:type billing_role_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BillingRoleDefinition or the result of cls(response)
:rtype: ~azure.mgmt.billing.models.BillingRoleDefinition
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BillingRoleDefinition]
request = build_get_by_billing_account_request(
billing_account_name=billing_account_name,
billing_role_definition_name=billing_role_definition_name,
api_version=api_version,
template_url=self.get_by_billing_account.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("BillingRoleDefinition", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_billing_account.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingRoleDefinitions/{billingRoleDefinitionName}"} # type: ignore
@distributed_trace
def get_by_invoice_section(
self,
billing_account_name: str,
billing_profile_name: str,
invoice_section_name: str,
billing_role_definition_name: str,
**kwargs: Any
) -> _models.BillingRoleDefinition:
"""Gets the definition for a role on an invoice section. The operation is supported only for
billing accounts with agreement type Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param billing_profile_name: The ID that uniquely identifies a billing profile. Required.
:type billing_profile_name: str
:param invoice_section_name: The ID that uniquely identifies an invoice section. Required.
:type invoice_section_name: str
:param billing_role_definition_name: The ID that uniquely identifies a role definition.
Required.
:type billing_role_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BillingRoleDefinition or the result of cls(response)
:rtype: ~azure.mgmt.billing.models.BillingRoleDefinition
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BillingRoleDefinition]
request = build_get_by_invoice_section_request(
billing_account_name=billing_account_name,
billing_profile_name=billing_profile_name,
invoice_section_name=invoice_section_name,
billing_role_definition_name=billing_role_definition_name,
api_version=api_version,
template_url=self.get_by_invoice_section.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("BillingRoleDefinition", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_invoice_section.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}/billingRoleDefinitions/{billingRoleDefinitionName}"} # type: ignore
@distributed_trace
def get_by_billing_profile(
self, billing_account_name: str, billing_profile_name: str, billing_role_definition_name: str, **kwargs: Any
) -> _models.BillingRoleDefinition:
"""Gets the definition for a role on a billing profile. The operation is supported for billing
accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param billing_profile_name: The ID that uniquely identifies a billing profile. Required.
:type billing_profile_name: str
:param billing_role_definition_name: The ID that uniquely identifies a role definition.
Required.
:type billing_role_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BillingRoleDefinition or the result of cls(response)
:rtype: ~azure.mgmt.billing.models.BillingRoleDefinition
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BillingRoleDefinition]
request = build_get_by_billing_profile_request(
billing_account_name=billing_account_name,
billing_profile_name=billing_profile_name,
billing_role_definition_name=billing_role_definition_name,
api_version=api_version,
template_url=self.get_by_billing_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("BillingRoleDefinition", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_billing_profile.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/billingRoleDefinitions/{billingRoleDefinitionName}"} # type: ignore
@distributed_trace
def list_by_billing_account(
self, billing_account_name: str, **kwargs: Any
) -> Iterable["_models.BillingRoleDefinition"]:
"""Lists the role definitions for a billing account. The operation is supported for billing
accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BillingRoleDefinition or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.billing.models.BillingRoleDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BillingRoleDefinitionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_account_request(
billing_account_name=billing_account_name,
api_version=api_version,
template_url=self.list_by_billing_account.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BillingRoleDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_billing_account.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingRoleDefinitions"} # type: ignore
@distributed_trace
def list_by_invoice_section(
self, billing_account_name: str, billing_profile_name: str, invoice_section_name: str, **kwargs: Any
) -> Iterable["_models.BillingRoleDefinition"]:
"""Lists the role definitions for an invoice section. The operation is supported for billing
accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param billing_profile_name: The ID that uniquely identifies a billing profile. Required.
:type billing_profile_name: str
:param invoice_section_name: The ID that uniquely identifies an invoice section. Required.
:type invoice_section_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BillingRoleDefinition or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.billing.models.BillingRoleDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BillingRoleDefinitionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_invoice_section_request(
billing_account_name=billing_account_name,
billing_profile_name=billing_profile_name,
invoice_section_name=invoice_section_name,
api_version=api_version,
template_url=self.list_by_invoice_section.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BillingRoleDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_invoice_section.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}/billingRoleDefinitions"} # type: ignore
@distributed_trace
def list_by_billing_profile(
self, billing_account_name: str, billing_profile_name: str, **kwargs: Any
) -> Iterable["_models.BillingRoleDefinition"]:
"""Lists the role definitions for a billing profile. The operation is supported for billing
accounts with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param billing_profile_name: The ID that uniquely identifies a billing profile. Required.
:type billing_profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BillingRoleDefinition or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.billing.models.BillingRoleDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BillingRoleDefinitionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_profile_request(
billing_account_name=billing_account_name,
billing_profile_name=billing_profile_name,
api_version=api_version,
template_url=self.list_by_billing_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BillingRoleDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_billing_profile.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/billingRoleDefinitions"} # type: ignore
|
{
"content_hash": "c27944c359cb53c2cb923c884dc7a850",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 255,
"avg_line_length": 46.5414201183432,
"alnum_prop": 0.6579683427626979,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a12bedcc72c961bc1a8b78713111e6cfc2ff2c07",
"size": "31962",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/billing/azure-mgmt-billing/azure/mgmt/billing/operations/_billing_role_definitions_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from M2Crypto import SMIME, X509
# Instantiate an SMIME object.
s = SMIME.SMIME()
# Load the signer's cert.
x509 = X509.load_cert('mycert.pem')
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
# Load the signer's CA cert. In this case, because the signer's
# cert is self-signed, it is the signer's cert itself.
st = X509.X509_Store()
st.load_info('mycert.pem')
s.set_x509_store(st)
# Load the data, verify it.
p7, data = SMIME.smime_load_pkcs7('smime_signed.txt')
v = s.verify(p7, data)
print v
print data
print data.read()
|
{
"content_hash": "560734ccaba988ae19b186a4ed332901",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 63,
"avg_line_length": 23.347826086956523,
"alnum_prop": 0.7094972067039106,
"repo_name": "joschi/jersey-smime",
"id": "f550d8dbdc024bdeac2d515ae87378b8b3f3161a",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/resources/verify_signed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "169338"
},
{
"name": "Python",
"bytes": "4847"
}
],
"symlink_target": ""
}
|
import re
class Key(object):
def __init__(self, key="", start="", end=""):
self.key = key
self.start = start
self.end = end
def is_match(self, line="", key=""):
val = []
match = re.search(key, line)
if match:
val = match.groups()
m = True
else:
m = False
return m, val
def get_keys(self):
return self.key
def get_patterns(self):
temp = []
for i in self.key:
temp.append(i[0])
return temp
def get_key_mode(self, key):
return key[1]
def get_start_key(self):
return self.start
def get_end_key(self):
return self.end
|
{
"content_hash": "7e6a22329a5a0ead4d1f5454e8a2598f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 49,
"avg_line_length": 18.025,
"alnum_prop": 0.47711511789181693,
"repo_name": "fmichalo/n9k-programmability",
"id": "1d38ed7e8993f2afd8f1044494cbc56288adf125",
"size": "783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python nxapi scripts/cisco/key.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93181"
},
{
"name": "HTML",
"bytes": "31326"
},
{
"name": "JavaScript",
"bytes": "26618"
},
{
"name": "Python",
"bytes": "469144"
}
],
"symlink_target": ""
}
|
from axon.test import *
test_all()
|
{
"content_hash": "32b89092c0c0d401e863eddfde5c4eeb",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 23,
"avg_line_length": 9.25,
"alnum_prop": 0.6756756756756757,
"repo_name": "intellimath/pyaxon",
"id": "c96c5f25179f5e1e8bb91eabacb4cf6e17d03c30",
"size": "1213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "2731"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "284693"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.conf import settings
from .. import fields
from models import *
from datetime import date
incoming_markdown = "**bold**, *italic*"
gen_html = "<p><strong>bold</strong>, <em>italic</em></p>"
class AutoMarkdownTests(TestCase):
def setUp(self):
self.m = TestAutoDescriptionModel()
self.m.text = incoming_markdown
self.m.save()
def tearDown(self):
self.m.delete()
def test_nonpop_markdown(self):
self.assertEquals(self.m.nonpop, "")
def test_auto_markdown(self):
self.assertEquals(self.m.html, gen_html)
def test_only_create_markdown(self):
self.m.text = ""
self.m.save()
self.assertEquals(self.m.html, "")
self.assertEquals(self.m.nonupdate, gen_html)
class AutoSlugTests(TestCase):
def setUp(self):
self.m1 = TestAutoSlugModel(name = "Some String")
self.m1.save()
self.m2 = TestAutoSlugModel(name = "Some String")
self.m2.save()
super(AutoSlugTests, self).setUp()
def tearDown(self):
self.m1.delete()
self.m2.delete()
def test_nongen_slug(self):
m = TestAutoSlugModel(name = "Some String")
m.slug = "a-slug"
m.uniq = "a-slug"
m.save()
self.assertEquals(m.name, "Some String")
self.assertEquals(m.slug, "a-slug")
self.assertEquals(m.uniq, "a-slug")
def test_nonpop_slug(self):
self.assertEquals(self.m1.nonpop, "")
def test_nonuniq_slug(self):
self.assertEquals(self.m1.slug, "some-string")
self.assertEquals(self.m2.slug, "some-string")
def test_uniq_slug(self):
self.assertEquals(self.m1.uniq, "some-string")
self.assertEquals(self.m2.uniq, "some-string-1")
class AutoSlugFieldUniqueTests(TestCase):
def setUp(self):
self.m1 = TestFieldUniqueSlugModel()
self.m1.name = "Jon Raphaelson"
self.m1.date = date(2009, 8, 1)
self.m1.save()
self.m2 = TestFieldUniqueSlugModel()
self.m2.name = "Jon Raphaelson"
self.m2.date = date(2009, 8, 2)
self.m2.save()
self.m3 = TestFieldUniqueSlugModel()
self.m3.name = "Jon Raphaelson"
self.m3.date = date(2009, 8, 2)
self.m3.save()
def test_unique(self):
self.assertEquals(self.m1.slug, "jon-raphaelson")
self.assertEquals(self.m1.uniq, "jon-raphaelson")
self.assertEquals(self.m2.slug, "jon-raphaelson-1")
self.assertEquals(self.m2.uniq, "jon-raphaelson")
self.assertEquals(self.m3.slug, "jon-raphaelson-2")
self.assertEquals(self.m3.uniq, "jon-raphaelson-1")
class SlugFieldFormatTests(TestCase):
def test(self):
settings.AUTOSLUG_FORMAT = "%s.%s"
m1 = TestFieldUniqueSlugModel()
m1.name = "Jon Raphaelson"
m1.date = date(2009, 8, 1)
m1.save()
m2 = TestFieldUniqueSlugModel()
m2.name = "Jon Raphaelson"
m2.date = date(2009, 8, 1)
m2.save()
self.assertEquals(m2.slug, "jon-raphaelson.1")
class SerializedDataTests(TestCase):
def setUp(self):
self.list = TestSerializedDataModel()
self.list.data = [1,2,3,4,5,6,7,8,9]
self.list.save()
self.tuples = TestSerializedDataModel()
self.tuples.data = (1,2,3)
self.tuples.save()
self.null = TestSerializedDataModel()
self.null.data = None
self.null.save()
self.default = TestSerializedDataModel()
self.default.save()
def test_serialized(self):
l = TestSerializedDataModel.objects.get(pk = 1)
self.assertEquals(type(l.data), type([1]))
self.assertEquals(l.data, [1,2,3,4,5,6,7,8,9])
t = TestSerializedDataModel.objects.get(pk = 2)
self.assertEquals(type(t.data), type((1,)))
self.assertEquals(t.data, (1,2,3))
def test_null(self):
n = TestSerializedDataModel.objects.get(pk = 3)
self.assertEquals(n.data, None)
def test_default(self):
d = TestSerializedDataModel.objects.get(pk = 4)
self.assertEquals(d.data, None)
|
{
"content_hash": "e2b4e55d9a8b1f85fff3118fa45e7c98",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 59,
"avg_line_length": 30.566176470588236,
"alnum_prop": 0.612942025499158,
"repo_name": "lygaret/django-autofields",
"id": "d55dda708b14770945e0676691a296378b570ba7",
"size": "4157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autofields/tests/autofield.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12100"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
import sys
import types
from datetime import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'releases']
# 'releases' (changelog) settings
releases_issue_uri = "https://github.com/fabric/fabric/issues/%s"
releases_release_uri = "https://github.com/fabric/fabric/tree/%s"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fabric'
year = datetime.now().year
copyright = u'%d, Christian Vest Hansen and Jeffrey E. Forcier' % year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Add this checkout's local Fabric module to sys.path. Allows use of
# fabric.version in here, and ensures that the autodoc stuff also works.
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), '..')))
from fabric.version import get_version
# Get version info
#
# Branch-only name
version = get_version('branch')
# The full human readable version, including alpha/beta/rc tags.
release = get_version('normal')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
html_style = 'rtd.css'
html_context = {}
from fabric.api import local, hide, settings
with settings(hide('everything'), warn_only=True):
get_tags = 'git tag | sort -r | egrep "(1\.[^0]+)\.."'
tag_result = local(get_tags, True)
if tag_result.succeeded:
html_context['fabric_tags'] = tag_result.split()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fabricdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Fabric.tex', u'Fabric Documentation',
u'Jeff Forcier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Restore decorated functions so that autodoc inspects the right arguments
def unwrap_decorated_functions():
from fabric import operations, context_managers
for module in [context_managers, operations]:
for name, obj in vars(module).iteritems():
if (
# Only function objects - just in case some real object showed
# up that had .undecorated
isinstance(obj, types.FunctionType)
# Has our .undecorated 'cache' of the real object
and hasattr(obj, 'undecorated')
):
setattr(module, name, obj.undecorated)
unwrap_decorated_functions()
|
{
"content_hash": "5d14e3209fcd14f4243f9edfa0ea53e8",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 80,
"avg_line_length": 33.44,
"alnum_prop": 0.7026847421584264,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "cae374741d5c5a875c06a832781e03298fa914e0",
"size": "7941",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Dataset/python/conf (12).py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
}
|
import binary_heap as bh
class priorityQueue(object):
def __init__(self):
self.bin_heap = bh.binHeap()
def insert(self, input_value, order):
self.bin_heap.push(input_value)
def pop(self):
input_value = self.bin_heap.pop()
return input_value
def peek(self):
input_value = self.bin_heap.pop()
return input_value
|
{
"content_hash": "a31eb001e5e3442246a281131b3a9865",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 41,
"avg_line_length": 21.055555555555557,
"alnum_prop": 0.604221635883905,
"repo_name": "jacquestardie/data-structures",
"id": "6a43fae7722f1a85e9b30b3b487583cada93b1d4",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "priority_queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23297"
}
],
"symlink_target": ""
}
|
"""
Provides interfaces to various commands provided by Camino-Trackvis
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import os
from ...utils.filemanip import split_filename
from ..base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File
class Camino2TrackvisInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='-i %s', mandatory=True, position=1,
desc='The input .Bfloat (camino) file.')
out_file = File(argstr='-o %s', genfile=True, position=2,
desc='The filename to which to write the .trk (trackvis) file.')
min_length = traits.Float(argstr='-l %d', position=3,
units='mm', desc='The minimum length of tracts to output')
data_dims = traits.List(traits.Int, argstr='-d %s', sep=',',
mandatory=True, position=4, minlen=3, maxlen=3,
desc='Three comma-separated integers giving the number of voxels along each dimension of the source scans.')
voxel_dims = traits.List(traits.Float, argstr='-x %s', sep=',',
mandatory=True, position=5, minlen=3, maxlen=3,
desc='Three comma-separated numbers giving the size of each voxel in mm.')
# Change to enum with all combinations? i.e. LAS, LPI, RAS, etc..
voxel_order = File(argstr='--voxel-order %s', mandatory=True, position=6,
desc='Set the order in which various directions were stored.\
Specify with three letters consisting of one each \
from the pairs LR, AP, and SI. These stand for Left-Right, \
Anterior-Posterior, and Superior-Inferior. \
Whichever is specified in each position will \
be the direction of increasing order. \
Read coordinate system from a NIfTI file.')
nifti_file = File(argstr='--nifti %s', exists=True,
position=7, desc='Read coordinate system from a NIfTI file.')
class Camino2TrackvisOutputSpec(TraitedSpec):
trackvis = File(exists=True, desc='The filename to which to write the .trk (trackvis) file.')
class Camino2Trackvis(CommandLine):
""" Wraps camino_to_trackvis from Camino-Trackvis
Convert files from camino .Bfloat format to trackvis .trk format.
Example
-------
>>> import nipype.interfaces.camino2trackvis as cam2trk
>>> c2t = cam2trk.Camino2Trackvis()
>>> c2t.inputs.in_file = 'data.Bfloat'
>>> c2t.inputs.out_file = 'streamlines.trk'
>>> c2t.inputs.min_length = 30
>>> c2t.inputs.data_dims = [128, 104, 64]
>>> c2t.inputs.voxel_dims = [2.0, 2.0, 2.0]
>>> c2t.inputs.voxel_order = 'LAS'
>>> c2t.run() # doctest: +SKIP
"""
_cmd = 'camino_to_trackvis'
input_spec = Camino2TrackvisInputSpec
output_spec = Camino2TrackvisOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['trackvis'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '.trk'
class Trackvis2CaminoInputSpec(CommandLineInputSpec):
""" Wraps trackvis_to_camino from Camino-Trackvis
Convert files from camino .Bfloat format to trackvis .trk format.
Example
-------
>>> import nipype.interfaces.camino2trackvis as cam2trk
>>> t2c = cam2trk.Trackvis2Camino()
>>> t2c.inputs.in_file = 'streamlines.trk'
>>> t2c.inputs.out_file = 'streamlines.Bfloat'
>>> t2c.run() # doctest: +SKIP
"""
in_file = File(exists=True, argstr='-i %s',
mandatory=True, position=1,
desc='The input .trk (trackvis) file.')
out_file = File(argstr='-o %s', genfile=True,
position=2, desc='The filename to which to write the .Bfloat (camino).')
append_file = File(exists=True, argstr='-a %s',
position=2, desc='A file to which the append the .Bfloat data. ')
class Trackvis2CaminoOutputSpec(TraitedSpec):
camino = File(exists=True, desc='The filename to which to write the .Bfloat (camino).')
class Trackvis2Camino(CommandLine):
_cmd = 'trackvis_to_camino'
input_spec = Trackvis2CaminoInputSpec
output_spec = Trackvis2CaminoOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['camino'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '.Bfloat'
|
{
"content_hash": "4ba7cbd2f49514d8eadebac3e7753e80",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 136,
"avg_line_length": 36.816901408450704,
"alnum_prop": 0.6201224177505739,
"repo_name": "mick-d/nipype",
"id": "b9032ba1cdd4a8d518290298007eb555f5be4361",
"size": "5252",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "nipype/interfaces/camino2trackvis/convert.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
import mock
import datetime
from cloudferry.lib.utils import filters
from tests import test
class BaseCFFiltersTestCase(test.TestCase):
def test_base_class_has_get_filters_method(self):
self.assertTrue(callable(filters.CFFilters.get_filters))
def test_cannot_create_object_of_filters(self):
self.assertRaises(TypeError, filters.CFFilters)
class FilterYamlTestCase(test.TestCase):
@mock.patch("cloudferry.lib.utils.filters.yaml.load")
def test_reads_config_on_first_use(self, yaml_load):
fy_stream = mock.Mock()
fy = filters.FilterYaml(fy_stream)
self.assertFalse(yaml_load.called)
fy.get_filter_yaml()
self.assertTrue(yaml_load.called)
def test_returns_empty_dict_if_filter_conf_is_empty(self):
filters_file = u""
fy = filters.FilterYaml(filters_file)
self.assertEqual(dict(), fy.get_filter_yaml())
def test_returns_none_if_no_tenant_provided(self):
filters_file = u""
fy = filters.FilterYaml(filters_file)
self.assertIsNone(fy.get_tenant())
def test_returns_empty_list_if_nothing_in_image_ids(self):
filters_file = u""
fy = filters.FilterYaml(filters_file)
self.assertEqual(list(), fy.get_image_ids())
def test_returns_empty_list_for_get_excluded_image_ids(self):
filters_file = u""
fy = filters.FilterYaml(filters_file)
self.assertEqual(list(), fy.get_excluded_image_ids())
def test_returns_empty_list_if_nothing_in_instance_ids(self):
filters_file = u""
fy = filters.FilterYaml(filters_file)
self.assertEqual(list(), fy.get_instance_ids())
def test_returns_tenant_id_if_provided(self):
tenant_id = 'some-tenant'
filters_file = u"""
tenants:
tenant_id:
- {tenant_id}
""".format(tenant_id=tenant_id)
fy = filters.FilterYaml(filters_file)
self.assertEqual(tenant_id, fy.get_tenant())
def test_returns_instances_from_instance_ids(self):
instance1 = 'inst1'
instance2 = 'inst2'
filters_file = u"""
instances:
id:
- {instance1}
- {instance2}
""".format(instance1=instance1, instance2=instance2)
fy = filters.FilterYaml(filters_file)
filtered_instances = fy.get_instance_ids()
self.assertTrue(isinstance(filtered_instances, list))
self.assertIn(instance1, filtered_instances)
self.assertIn(instance2, filtered_instances)
def test_returns_images_from_excluded_image_ids(self):
image1 = 'image1'
image2 = 'image2'
filters_file = u"""
images:
exclude_images_list:
- {image1}
- {image2}
""".format(image1=image1, image2=image2)
fy = filters.FilterYaml(filters_file)
filtered_images = fy.get_excluded_image_ids()
self.assertTrue(isinstance(filtered_images, list))
self.assertIn(image1, filtered_images)
self.assertIn(image2, filtered_images)
def test_returns_images_from_image_ids(self):
image1 = 'image1'
image2 = 'image2'
filters_file = u"""
images:
images_list:
- {image1}
- {image2}
""".format(image1=image1, image2=image2)
fy = filters.FilterYaml(filters_file)
filtered_images = fy.get_image_ids()
self.assertTrue(isinstance(filtered_images, list))
self.assertIn(image1, filtered_images)
self.assertIn(image2, filtered_images)
def test_date_returns_none_if_not_specified(self):
filters_file = u""
fy = filters.FilterYaml(filters_file)
self.assertIsNone(fy.get_image_date())
def test_date_filter_returns_datetime_object(self):
filters_file = u"""
images:
date: 2000-01-01
"""
fy = filters.FilterYaml(filters_file)
filtered_date = fy.get_image_date()
self.assertTrue(isinstance(filtered_date, datetime.date))
|
{
"content_hash": "796912467b0a1c488d8438331180c274",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 65,
"avg_line_length": 32.51587301587302,
"alnum_prop": 0.6177690993409812,
"repo_name": "SVilgelm/CloudFerry",
"id": "1f31b3e5a7544691888361a6f6c6deb7f73a8e36",
"size": "4674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lib/utils/test_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "1718937"
},
{
"name": "Ruby",
"bytes": "2507"
},
{
"name": "Shell",
"bytes": "11910"
}
],
"symlink_target": ""
}
|
"""Entry point of Python Exonum test library"""
from suite.exonum import ExonumNetwork
from suite.process_manager import ProcessOutput, ProcessExitResult
from suite.common import *
from suite.crypto_advanced_client import ExonumCryptoAdvancedClient
|
{
"content_hash": "3c96763ebe00209dc763e78282bcb349",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 67,
"avg_line_length": 49.8,
"alnum_prop": 0.8433734939759037,
"repo_name": "alekseysidorov/exonum",
"id": "4581a2854dcae93887ad673ccaa11f1ffac50013",
"size": "249",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test-suite/exonum-py-tests/suite/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7043"
},
{
"name": "Python",
"bytes": "68358"
},
{
"name": "Rust",
"bytes": "3528710"
},
{
"name": "Shell",
"bytes": "820"
}
],
"symlink_target": ""
}
|
"""
Created on 4 Sep 2020
Updated 23 Mar 2021
@author: Jade Page (jade.page@southcoastscience.com)
https://packaging.python.org/tutorials/packaging-projects/
https://packaging.python.org/guides/single-sourcing-package-version/
"""
import codecs
import os
import setuptools
# --------------------------------------------------------------------------------------------------------------------
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
return line.split("'")[1]
else:
raise RuntimeError("Unable to find version string.")
# --------------------------------------------------------------------------------------------------------------------
with open("README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as req_txt:
required = [line for line in req_txt.read().splitlines() if line]
setuptools.setup(
name="scs-core",
version=get_version("src/scs_core/__init__.py"),
author="South Coast Science",
author_email="contact@southcoastscience.com",
description="The root of all South Coast Science environmental monitoring applications.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/south-coast-science/scs_core",
package_dir={'': 'src'},
packages=setuptools.find_packages('src'),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
],
python_requires='>3.6',
install_requires=required,
platforms=['any'],
)
|
{
"content_hash": "f3c8a88065ba4ec5f1af763128367b7e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 118,
"avg_line_length": 31.3015873015873,
"alnum_prop": 0.5786004056795132,
"repo_name": "south-coast-science/scs_core",
"id": "802e4cf1b7f847e1d3a390083ee5b2f966d53624",
"size": "1996",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1461551"
}
],
"symlink_target": ""
}
|
"""Add status to leases
Revision ID: 23d6240b51b2
Revises: 2bcfe76b0474
Create Date: 2014-04-25 10:41:09.183430
"""
# revision identifiers, used by Alembic.
revision = '23d6240b51b2'
down_revision = '2bcfe76b0474'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('leases', sa.Column(
'action', sa.String(length=255), nullable=True))
op.add_column('leases', sa.Column(
'status', sa.String(length=255), nullable=True))
op.add_column('leases', sa.Column(
'status_reason', sa.String(length=255), nullable=True))
def downgrade():
engine = op.get_bind().engine
if engine.name == 'sqlite':
# Only for testing purposes with sqlite
op.execute('CREATE TABLE tmp_leases as SELECT created_at, updated_at, '
'id, name, user_id, project_id, start_date, '
'end_date, trust_id FROM leases')
op.execute('DROP TABLE leases')
op.execute('ALTER TABLE tmp_leases RENAME TO leases')
return
op.drop_column('leases', 'action')
op.drop_column('leases', 'status')
op.drop_column('leases', 'status_reason')
|
{
"content_hash": "53dc8cf74a4c172b5fa4269077c92604",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 29.487179487179485,
"alnum_prop": 0.6443478260869565,
"repo_name": "frossigneux/blazar",
"id": "d4300ab8efb12bce0f3f133a9a7b77f2db416a3d",
"size": "1737",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "climate/db/migration/alembic_migrations/versions/23d6240b51b2_add_status_to_leases.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15839"
},
{
"name": "JavaScript",
"bytes": "190"
},
{
"name": "Python",
"bytes": "926277"
},
{
"name": "Shell",
"bytes": "786"
}
],
"symlink_target": ""
}
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer, BigInteger, Numeric, DateTime, ForeignKey
from sqlalchemy.orm import backref, relationship
from geoalchemy2 import Geometry
"""
Object-relational model of floats database.
See etl.py for extract/transform/load procedure
"""
Base = declarative_base()
class Float(Base):
"""
Represents a single NOAA float from the database.
"""
__tablename__ = 'floats'
id = Column(BigInteger, primary_key=True) # ID
pi = Column(String) # PRINCIPAL_INVESTIGATOR
organization = Column(String) # ORGANIZATION
experiment = Column(String) # EXPERIMENT
start_date = Column(DateTime) # 1st_DATE
start_lat = Column(Numeric) # 1st_LAT
start_lon = Column(Numeric) # 1st_LON
end_date = Column(DateTime) # END_DATE
end_lat = Column(Numeric) # END_LAT
end_lon = Column(Numeric) # END_LON
type = Column(String) # TYPE
filename = Column(String) # FILENAME
track = Column(Geometry('MULTILINESTRING')) # track geometry
def __repr__(self):
return '<Float #%d>' % (self.id)
def get_metadata(self):
"""
Return float metadata as dict
"""
return {
'ID': self.id,
'PRINCIPAL_INVESTIGATOR': self.pi,
'ORGANIZATION': self.organization,
'EXPERIMENT': self.experiment,
'1st_DATE': self.start_date,
'1st_LAT': self.start_lat,
'1st_LON': self.start_lon,
'END_DATE': self.end_date,
'END_LAT': self.end_lat,
'END_LON': self.end_lon,
'TYPE': self.type,
'FILENAME': self.filename
}
class Point(Base):
"""
Represents a single point along a float track
"""
__tablename__ = 'points'
id = Column(BigInteger, primary_key=True)
float_id = Column(BigInteger, ForeignKey('floats.id'))
date = Column(DateTime) # DATE, TIME
lat = Column(Numeric) # latitude
lon = Column(Numeric) # longitude
pressure = Column(Numeric) # pressure
u = Column(Numeric) # velocity u component
v = Column(Numeric) # velocity v component
temperature = Column(Numeric) # temperature
q_time = Column(Integer) # quality annotation on date/time
q_pos = Column(Integer) # quality annotation on lat/lon
q_press = Column(Integer) # quality annotation on pressure
q_vel = Column(Integer) # quality annotation on u/v
q_temp = Column(Integer) # quality annotation on temperature
# establish Float.points relationship
float = relationship('Float',
backref=backref('points', cascade='all, delete-orphan'))
def __repr__(self):
return '<Point %.4f %.4f %.2f>' % (self.lat, self.lon, self.pressure)
|
{
"content_hash": "8303353557b8e0f9aff7eca059d8bb71",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 89,
"avg_line_length": 34.15853658536585,
"alnum_prop": 0.6315601570867547,
"repo_name": "joefutrelle/noaa_floats",
"id": "85b1ca148713c5125410cc78a99c89127b4be2dd",
"size": "2801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5954"
},
{
"name": "HTML",
"bytes": "4612"
},
{
"name": "JavaScript",
"bytes": "6272"
},
{
"name": "Python",
"bytes": "19300"
},
{
"name": "Shell",
"bytes": "139"
}
],
"symlink_target": ""
}
|
dbfilename = '../db/people_file'
ENDDB = 'enddb'
ENDREC = 'endrec'
RECSEP = '=>'
def storeDbase(db, dbfilename=dbfilename):
dbfile = open(dbfilename, 'w')
for key in db:
print(key, file=dbfile)
for (name,value) in db[key].items():
print(name + RECSEP + repr(value), file=dbfile)
print(ENDREC, file=dbfile)
print(ENDDB, file=dbfile)
dbfile.close()
def loadDbase(dbfilename=dbfilename):
dbfile = open(dbfilename, 'r')
import sys
sys.stdin = dbfile
db = {}
key = input()
while key != ENDDB:
rec = {}
field = input()
while field != ENDREC:
name, value = field.split(RECSEP)
rec[name] = eval(value)
field = input()
db[key] = rec
key = input()
return db
if __name__ == '__main__':
from initdata import db
storeDbase(db)
|
{
"content_hash": "7f54465d6d8f2d91323002bdc74632a3",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 59,
"avg_line_length": 22.71794871794872,
"alnum_prop": 0.5519187358916479,
"repo_name": "lichengshuang/python",
"id": "932d5c922122ef6b524de42797ee354bf959f586",
"size": "910",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "others/study/Preview/make_db_file.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "321"
},
{
"name": "HTML",
"bytes": "68150"
},
{
"name": "Python",
"bytes": "420936"
},
{
"name": "Shell",
"bytes": "76121"
},
{
"name": "Vim script",
"bytes": "27690"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals, print_function
from applications.weixin.weixin.conf import get_conf
from applications.weixin.weixin.utils import get_class_by_path
class CacheMixin(object):
def gen_cache_key(self, from_user_name, **kwargs):
key = "USER:STATE:%s" % from_user_name
return key
def get_from_cache(self, from_user_name, **kwargs):
from django.core.cache import cache
key = self.gen_cache_key(from_user_name)
return cache.get(key)
def set_cache(self, from_user_name, **kwargs):
from django.core.cache import cache
key = self.gen_cache_key(from_user_name)
cache.set(key, kwargs)
class StateManager(CacheMixin, object):
"""
只负责状态切换的逻辑,Cache封装在这一层。
"""
states = dict(map(lambda item: (item[0], get_class_by_path(item[1])),
get_conf("WX_MANGER_STATES", {}).items()))
def __init__(self, origin, state_name="ECHO", no_cache=False, **kwargs):
# 如果no_cache, 默认不使用cache
self.origin = origin
self.use_cache = get_conf("WX_USE_CACHE") and no_cache
state = self.initial_state(self.origin, state_name, **kwargs)
if self.use_cache:
# 这里从cache中获取用户当前状态
self.now_state = None
else:
# 否则根据分发来的状态制定初始状态
self.now_state = state
@classmethod
def initial(cls, origin, state_name, **kwargs):
state = cls.initial_state(origin, state_name, **kwargs)
return cls(state=state, no_cache=True, **kwargs)
@classmethod
def initial_state(cls, origin, state_name, **kwargs):
if not state_name in cls.states:
return ""
state_cls = cls.states[state_name]
state = state_cls(origin=origin, **kwargs)
return state
def get_state(self, input):
next_state, kwargs = self.now_state.next(input)
return self.states[next_state](origin=self.origin, **kwargs)
def set_next(self, input):
self.now_state = self.get_state(input)
def handler(self, input):
response = self.now_state.to_xml(input)
self.set_next(input)
return response
|
{
"content_hash": "087ee8d8eec52ef1f612b849213b1593",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 76,
"avg_line_length": 33.71875,
"alnum_prop": 0.6232622798887859,
"repo_name": "chenchiyuan/django-project-template",
"id": "d67548dc521ba6d00f9d5d642fdbb0b56768aa3a",
"size": "2316",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "applications/weixin/weixin/manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "95453"
},
{
"name": "JavaScript",
"bytes": "2550308"
},
{
"name": "Python",
"bytes": "69953"
},
{
"name": "Shell",
"bytes": "844"
}
],
"symlink_target": ""
}
|
"""bool_q dataset."""
from tensorflow_datasets import testing
from tensorflow_datasets.datasets.bool_q import bool_q_dataset_builder
class PawsXWikiTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = bool_q_dataset_builder.Builder
SPLITS = {
"train": 2, # Number of fake train examples
"validation": 2, # Number of fake validation examples
}
DL_EXTRACT_RESULT = {
"train": "train.jsonl",
"validation": "dev.jsonl",
}
if __name__ == "__main__":
testing.test_main()
|
{
"content_hash": "d0b687fe9b8f4db6067c37805ef75e83",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 25.45,
"alnum_prop": 0.6738703339882122,
"repo_name": "tensorflow/datasets",
"id": "3b5675d5f98095d2500373c2703d74ed4c258893",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_datasets/datasets/bool_q/bool_q_dataset_builder_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "728"
},
{
"name": "JavaScript",
"bytes": "13369"
},
{
"name": "NewLisp",
"bytes": "13940"
},
{
"name": "Perl",
"bytes": "520"
},
{
"name": "Python",
"bytes": "5398856"
},
{
"name": "Roff",
"bytes": "22095"
},
{
"name": "Ruby",
"bytes": "25669"
},
{
"name": "Shell",
"bytes": "3895"
},
{
"name": "Smalltalk",
"bytes": "20604"
},
{
"name": "TeX",
"bytes": "759"
}
],
"symlink_target": ""
}
|
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 2432033
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Mincoin:0.14.3/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple mincoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
{
"content_hash": "12397434683919627f4d8c3d25a46c94",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 186,
"avg_line_length": 32.48101265822785,
"alnum_prop": 0.5518316445830086,
"repo_name": "mincoin-project/mincoin",
"id": "3fe6583414a22addab68918f45ded6becc2e9788",
"size": "5393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/seeds/makeseeds.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "650854"
},
{
"name": "C++",
"bytes": "4577982"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "174531"
},
{
"name": "Makefile",
"bytes": "102845"
},
{
"name": "Objective-C",
"bytes": "6702"
},
{
"name": "Objective-C++",
"bytes": "7229"
},
{
"name": "Python",
"bytes": "882269"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "3788"
},
{
"name": "Shell",
"bytes": "34265"
}
],
"symlink_target": ""
}
|
from handlers.index import login
from handlers.index import logout
from handlers.index import home
urls = [
(r'/login', login.LoginHandler),
(r'/login/search', login.UserSearch),
(r'/logout', logout.LogoutHandler)
]
urls += [
(r'/', home.LogoutHandler)
]
|
{
"content_hash": "f67b857c5f21604bf246e64389a62d70",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 41,
"avg_line_length": 19.571428571428573,
"alnum_prop": 0.6788321167883211,
"repo_name": "VMatrixTeam/open-matrix",
"id": "d534301fae8ae856a47656f1a209c646a21aae87",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/webservice/router/index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "91"
},
{
"name": "CSS",
"bytes": "224841"
},
{
"name": "HTML",
"bytes": "68428"
},
{
"name": "JavaScript",
"bytes": "733814"
},
{
"name": "Python",
"bytes": "106477"
},
{
"name": "Shell",
"bytes": "48"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
__author__ = 'szpytfire'
setup(
name = 'django_api_tools',
packages = ['django_api_tools'],
version = '0.1.1',
description = 'Django API add-on is a mini-framework which allows developers to run RESTful APIs alongside websites using Forms/Templates.',
author = 'Tom Szpytman',
author_email = 'mail@tomszpytman.com',
url = 'https://github.com/szpytfire/django-api-tools',
download_url = 'https://github.com/szpytfire/django-api-tools/tarball/0.1.1',
keywords = ['django', 'api', 'rest'],
classifiers = [],
)
|
{
"content_hash": "b9fcf0b4c7fa256621491e1485a04cb1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 142,
"avg_line_length": 35.125,
"alnum_prop": 0.6903914590747331,
"repo_name": "szpytfire/django-api-tools",
"id": "56d0f2faec91e79aca0cb2c9d1b160a45e5cf440",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59035"
}
],
"symlink_target": ""
}
|
import veritable
import veritable.utils
import json
import sys
TABLE_NAME = 'movielens'
'''
Perform analysis on the given data file and and schema, creating the
table first if needed and uploading the data.
'''
def main(data_file, schema_file):
rows = json.loads(open(data_file).read())
schema = json.loads(open(schema_file).read())
api = veritable.connect()
if not api.table_exists(TABLE_NAME):
print 'Creating table'
table = api.create_table(TABLE_NAME)
else:
print 'Getting table'
table = api.get_table(TABLE_NAME)
print 'Uploading rows'
table.batch_upload_rows(rows)
print 'Creating analysis'
analysis = table.create_analysis(schema)
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
{
"content_hash": "4523ca44619bd800ba20fbb85bbf59d8",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 69,
"avg_line_length": 23.235294117647058,
"alnum_prop": 0.6544303797468355,
"repo_name": "rkomartin/user-recs-example",
"id": "95b12f4511b59348d8b96466b8ff6b001104ff9c",
"size": "790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/run_analysis.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
WSGI config for spitballingHere project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spitballingHere.settings")
application = get_wsgi_application()
|
{
"content_hash": "d7955d9c7aacb660f30308e90a6b2c76",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.4375,
"alnum_prop": 0.7788697788697788,
"repo_name": "mikelese/spitballingHere",
"id": "038e204c4cee89aaccf8711eaa2b988d1a2c1592",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spitballingHere/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1396"
},
{
"name": "HTML",
"bytes": "50080"
},
{
"name": "Python",
"bytes": "20134"
}
],
"symlink_target": ""
}
|
trainImgFpath = "../data/plat-indo-train/master.jpg"
resultFlatten = "../xprmt/ocr/flattened_images.txt"
resultClass = "../xprmt/ocr/classifications.txt"
|
{
"content_hash": "01d3e76d33e9430d9fe11bb920e5d056",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 52,
"avg_line_length": 51,
"alnum_prop": 0.7516339869281046,
"repo_name": "Anggunasr/plat",
"id": "573216daaeb308a8c27fae22fdc32fe884da4cce",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ocr/train_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70812"
}
],
"symlink_target": ""
}
|
"""Helpers shared by cloudstorage_stub and cloudstorage_api."""
__all__ = ['CS_XML_NS',
'CSFileStat',
'dt_str_to_posix',
'LOCAL_API_HOST',
'local_run',
'get_access_token',
'get_metadata',
'GCSFileStat',
'http_time_to_posix',
'memory_usage',
'posix_time_to_http',
'posix_to_dt_str',
'set_access_token',
'validate_options',
'validate_bucket_name',
'validate_bucket_path',
'validate_file_path',
]
import calendar
import datetime
from email import utils as email_utils
import logging
import os
import re
try:
from google.appengine.api import runtime
except ImportError:
from google.appengine.api import runtime
_GCS_BUCKET_REGEX_BASE = r'[a-z0-9\.\-_]{3,63}'
_GCS_BUCKET_REGEX = re.compile(_GCS_BUCKET_REGEX_BASE + r'$')
_GCS_BUCKET_PATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'$')
_GCS_FULLPATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'/.*')
_GCS_OPTIONS = ('x-goog-acl',
'x-goog-meta-')
CS_XML_NS = 'http://doc.s3.amazonaws.com/2006-03-01'
LOCAL_API_HOST = 'gcs-magicstring.appspot.com'
_access_token = ''
def set_access_token(access_token):
"""Set the shared access token to authenticate with Google Cloud Storage.
When set, the library will always attempt to communicate with the
real Google Cloud Storage with this token even when running on dev appserver.
Note the token could expire so it's up to you to renew it.
When absent, the library will automatically request and refresh a token
on appserver, or when on dev appserver, talk to a Google Cloud Storage
stub.
Args:
access_token: you can get one by run 'gsutil -d ls' and copy the
str after 'Bearer'.
"""
global _access_token
_access_token = access_token
def get_access_token():
"""Returns the shared access token."""
return _access_token
class GCSFileStat(object):
"""Container for GCS file stat."""
def __init__(self,
filename,
st_size,
etag,
st_ctime,
content_type=None,
metadata=None):
"""Initialize.
Args:
filename: a Google Cloud Storage filename of form '/bucket/filename'.
st_size: file size in bytes. long compatible.
etag: hex digest of the md5 hash of the file's content. str.
st_ctime: posix file creation time. float compatible.
content_type: content type. str.
metadata: a str->str dict of user specified metadata from the
x-goog-meta header, e.g. {'x-goog-meta-foo': 'foo'}.
"""
self.filename = filename
self.st_size = long(st_size)
self.st_ctime = float(st_ctime)
if etag[0] == '"' and etag[-1] == '"':
etag = etag[1:-1]
self.etag = etag
self.content_type = content_type
self.metadata = metadata
def __repr__(self):
return (
'(filename: %(filename)s, st_size: %(st_size)s, '
'st_ctime: %(st_ctime)s, etag: %(etag)s, '
'content_type: %(content_type)s, '
'metadata: %(metadata)s)' %
dict(filename=self.filename,
st_size=self.st_size,
st_ctime=self.st_ctime,
etag=self.etag,
content_type=self.content_type,
metadata=self.metadata))
CSFileStat = GCSFileStat
def get_metadata(headers):
"""Get user defined metadata from HTTP response headers."""
return dict((k, v) for k, v in headers.iteritems()
if k.startswith('x-goog-meta-'))
def validate_bucket_name(name):
"""Validate a Google Storage bucket name.
Args:
name: a Google Storage bucket name with no prefix or suffix.
Raises:
ValueError: if name is invalid.
"""
_validate_path(name)
if not _GCS_BUCKET_REGEX.match(name):
raise ValueError('Bucket should be 3-63 characters long using only a-z,'
'0-9, underscore, dash or dot but got %s' % name)
def validate_bucket_path(path):
"""Validate a Google Cloud Storage bucket path.
Args:
path: a Google Storage bucket path. It should have form '/bucket'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_BUCKET_PATH_REGEX.match(path):
raise ValueError('Bucket should have format /bucket '
'but got %s' % path)
def validate_file_path(path):
"""Validate a Google Cloud Storage file path.
Args:
path: a Google Storage file path. It should have form '/bucket/filename'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_FULLPATH_REGEX.match(path):
raise ValueError('Path should have format /bucket/filename '
'but got %s' % path)
def _validate_path(path):
"""Basic validation of Google Storage paths.
Args:
path: a Google Storage path. It should have form '/bucket/filename'
or '/bucket'.
Raises:
ValueError: if path is invalid.
TypeError: if path is not of type basestring.
"""
if not path:
raise ValueError('Path is empty')
if not isinstance(path, basestring):
raise TypeError('Path should be a string but is %s (%s).' %
(path.__class__, path))
def validate_options(options):
"""Validate Google Cloud Storage options.
Args:
options: a str->basestring dict of options to pass to Google Cloud Storage.
Raises:
ValueError: if option is not supported.
TypeError: if option is not of type str or value of an option
is not of type basestring.
"""
if not options:
return
for k, v in options.iteritems():
if not isinstance(k, str):
raise TypeError('option %r should be a str.' % k)
if not any(k.startswith(valid) for valid in _GCS_OPTIONS):
raise ValueError('option %s is not supported.' % k)
if not isinstance(v, basestring):
raise TypeError('value %r for option %s should be of type basestring.' %
v, k)
def http_time_to_posix(http_time):
"""Convert HTTP time format to posix time.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
for http time format.
Args:
http_time: time in RFC 2616 format. e.g.
"Mon, 20 Nov 1995 19:12:08 GMT".
Returns:
A float of secs from unix epoch.
"""
if http_time is not None:
return email_utils.mktime_tz(email_utils.parsedate_tz(http_time))
def posix_time_to_http(posix_time):
"""Convert posix time to HTML header time format.
Args:
posix_time: unix time.
Returns:
A datatime str in RFC 2616 format.
"""
if posix_time:
return email_utils.formatdate(posix_time, usegmt=True)
_DT_FORMAT = '%Y-%m-%dT%H:%M:%S'
def dt_str_to_posix(dt_str):
"""format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
"""
parsable, _ = dt_str.split('.')
dt = datetime.datetime.strptime(parsable, _DT_FORMAT)
return calendar.timegm(dt.utctimetuple())
def posix_to_dt_str(posix):
"""Reverse of str_to_datetime.
This is used by GCS stub to generate GET bucket XML response.
Args:
posix: A float of secs from unix epoch.
Returns:
A datetime str.
"""
dt = datetime.datetime.utcfromtimestamp(posix)
dt_str = dt.strftime(_DT_FORMAT)
return dt_str + '.000Z'
def local_run():
"""Whether running in dev appserver."""
return ('SERVER_SOFTWARE' not in os.environ or
os.environ['SERVER_SOFTWARE'].startswith('Development'))
def memory_usage(method):
"""Log memory usage before and after a method."""
def wrapper(*args, **kwargs):
logging.info('Memory before method %s is %s.',
method.__name__, runtime.memory_usage().current())
result = method(*args, **kwargs)
logging.info('Memory after method %s is %s',
method.__name__, runtime.memory_usage().current())
return result
return wrapper
|
{
"content_hash": "88fc4adb7f8735860dae23dd2818ff81",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 79,
"avg_line_length": 27.574257425742573,
"alnum_prop": 0.6330341113105925,
"repo_name": "nicko96/Chrome-Infra",
"id": "eb7a214bc1a4b31e7270bc6c7b8d4f6838cf6082",
"size": "8406",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "appengine/chromium_build_logs/third_party/cloudstorage/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "100398"
},
{
"name": "Go",
"bytes": "648467"
},
{
"name": "HTML",
"bytes": "7323317"
},
{
"name": "JavaScript",
"bytes": "913960"
},
{
"name": "Makefile",
"bytes": "11281"
},
{
"name": "Protocol Buffer",
"bytes": "2730"
},
{
"name": "Python",
"bytes": "4034630"
},
{
"name": "Shell",
"bytes": "21687"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import time
import logging
import logging.config
import json
import cv2
import numpy as np
def time_this(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
argsStr = unicode(args)[:10]
kwStr = unicode(kw)[:10]
print("%r (%10r, %10r) %2.2f sec" % \
(method.__name__, argsStr, kwStr, te - ts))
return result
return timed
def dir_methods_help(obj, spacing=10, collapse=1):
""" Prints methods and doc strings.
Takse modlue, class, list, dictionary, or string."""
methodList = [m for m in dir(obj) if callable(getattr(obj, m))]
strProc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["%s %s" %
(m, strProc(str(getattr(obj, m).__doc__)))
for m in methodList]
))
def polyline(img, points, color):
shape = points.shape
if len(shape) != 2 and (len(shape) > 1 and shape[1] != 2):
print('incoming points needs to be np array, in shape (-1,2)')
return
xp = None
yp = None
for x, y in points:
if xp is None:
xp = x
yp = y
else:
cv2.line(img,(xp, yp), (x, y), color)
xp = x
yp = y
print(x, y)
def setup_logging( \
default_path='../config/log_config.json', \
default_level=logging.INFO, \
env_key='LOG_CFG' \
):
config_file = default_path
if os.path.exists(config_file):
with file(config_file, 'r') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
|
{
"content_hash": "d07681878c7b7433f34101f51329e34d",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 82,
"avg_line_length": 25.942857142857143,
"alnum_prop": 0.5501101321585903,
"repo_name": "neilhan/python_cv_learning",
"id": "06a732d8b089be2d7f4e3170f579c2d62f837ec8",
"size": "1863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mylib/ava/utl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "69217"
}
],
"symlink_target": ""
}
|
import json
import os
import sys
import pickle
from flask import Flask, request, make_response
import pystache
from resultslogger.constants import ResultLoggerConstants
from resultslogger.experimentlogger import ExperimentLogger
from resultslogger.experimentqueue import ExperimentQueue, CsvExperimentQueue
from resultslogger.bayesoptqueue import BayesianOptimizedExperimentQueue
def load_template(relative_filename: str):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), relative_filename)) as f:
return pystache.parse(f.read())
class ResultsLoggerServer:
PAGE_TEMPLATE = load_template("resources/page.mustache")
def __init__(self, experiment_name: str, queue: ExperimentQueue,
autosave_path: str='.', experiment_logger: ExperimentLogger=None,
allow_unsolicited_results: bool=True):
"""
:param autosave_path: the path where to autosave the results
:param lease_timout_secs: the number of secs that a lease times out. Defaults to 2 days
:param allow_unsolicited_results: allow clients to report results about experiments that are not in the queue/have been leased.
"""
self.__app = Flask(__name__)
self.__queue = queue
self.__renderer = pystache.Renderer()
self.__experiment_name = experiment_name
@self.__app.route(ResultLoggerConstants.ROUTE_LEASE_EXPERIMENT, methods=['POST'])
def lease_next_experiment():
client = request.form[ResultLoggerConstants.FIELD_CLIENT]
next_lease = self.__queue.lease_new(client)
if next_lease is None:
return ResultLoggerConstants.END
params, experiment_id = next_lease
return json.dumps(dict(parameters=params, experiment_id=experiment_id))
@self.__app.route(ResultLoggerConstants.ROUTE_STORE_EXPERIMENT, methods=['POST'])
def store_experiment():
client = request.form[ResultLoggerConstants.FIELD_CLIENT]
experiment_parameters = request.form[ResultLoggerConstants.FIELD_PARAMETERS]
results = request.form[ResultLoggerConstants.FIELD_RESULTS]
experiment_id = request.form[ResultLoggerConstants.FIELD_EXPERIMENT_ID]
parameters = json.loads(experiment_parameters)
results = json.loads(results)
self.__logger.log_experiment(parameters, results)
experiment_id = int(experiment_id)
if experiment_id == -1 and not self.__allow_unsolicited_results:
assert False, "Unsolicited experiment returned"
else:
self.__queue.complete(experiment_id, parameters, client,
results[ResultLoggerConstants.BASE_RESULT_FIELD] if ResultLoggerConstants.BASE_RESULT_FIELD in results else 0.)
self.autosave()
return ResultLoggerConstants.OK
@self.__app.route(ResultLoggerConstants.ROUTE_EXPERIMENTS_ALL_RESULTS)
def show_results_html():
return self.__renderer.render(self.PAGE_TEMPLATE,
{'title': 'All Results',
'experiment_name' : self.__experiment_name,
'body': self.__pandas_to_html_table(self.__logger.all_results),
'summary_links': self.__get_groupby_links(set()),
'in_results': True})
@self.__app.route(ResultLoggerConstants.ROUTE_CSV_DUMP)
def download_csv():
response = make_response(self.__logger.all_results.to_csv())
response.headers['Content-Description'] = 'File Transfer'
response.headers['Cache-Control'] = 'no-cache'
response.headers['Content-Type'] = 'application/octet-stream'
response.headers['Content-Disposition'] = 'attachment; filename=%s' % self.__experiment_name+'.csv'
return response
@self.__app.route(ResultLoggerConstants.ROUTE_EXPERIMENTS_SUMMARY)
def show_summary_html():
group_by_values = request.args.get(ResultLoggerConstants.FIELD_GROUPBY).split(',')
return self.__renderer.render(self.PAGE_TEMPLATE,
{'title': 'Results Summary',
'experiment_name': self.__experiment_name,
'body': self.__pandas_to_html_table(self.__logger.all_results.groupby(group_by_values).mean()),
'summary_links': self.__get_groupby_links(set(group_by_values)),
'in_summary':True
})
@self.__app.route(ResultLoggerConstants.ROUTE_EXPERIMENTS_QUEUE)
def experiment_queue_html():
pct_completed = self.__queue.completed_percent * 100
pct_leased = self.__queue.leased_percent * 100
return self.__renderer.render(self.PAGE_TEMPLATE,
{'title': 'Experiments Queue',
'experiment_name' : self.__experiment_name,
'body': self.__pandas_to_html_table(self.__queue.all_experiments),
'progress': pct_completed + pct_leased > 0,
'progress_complete': int(pct_completed) if pct_completed > 0 else False,
'progress_leased': int(pct_leased) if pct_leased > 0 else False,
'summary_links': self.__get_groupby_links(set()),
'in_queue': True})
self.__autosave_path = autosave_path
self.__allow_unsolicited_results = allow_unsolicited_results
if experiment_logger is None:
self.__logger = ExperimentLogger(self.__queue.experiment_parameters, [])
else:
self.__logger = experiment_logger
def run(self):
self.__app.run(host='0.0.0.0')
def __pandas_to_html_table(self, frame):
return frame.to_html(classes=['table', 'table-striped', 'table-condensed', 'table-hover']).replace('border="1"', 'border="0"')
def __get_groupby_links(self, current_parameters:set)->list:
"""The group, by links are incremental, toggle-like"""
def get_parameters(name_of_param)-> tuple:
active = name_of_param in current_parameters
if active:
params_to_use = current_parameters - {name_of_param}
if len(params_to_use) > 0:
return ','.join(params_to_use)
else:
return ','.join(current_parameters)
else:
return ','.join(current_parameters | {name_of_param})
return [{'name': n, 'link': get_parameters(n), 'active': n in current_parameters} for n in self.__queue.experiment_parameters]
def autosave(self):
self.__logger.save_results_csv(os.path.join(self.__autosave_path, self.__experiment_name + "_results.csv"))
with open(os.path.join(self.__autosave_path, self.__experiment_name + ".pkl"), 'wb') as f:
pickle.dump((self.__queue, self.__logger), f, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(filename: str, experiment_name: str):
"""
Load previously saved experiment data and progress.
"""
with open(filename, 'rb') as f:
queue, experiment_logger = pickle.load(f)
return ResultsLoggerServer(experiment_name, queue=queue, experiment_logger=experiment_logger)
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage <experimentName> csv <listOfExperiments.csv>")
print("Usage <experimentName> bayesianOpt <inputSpaceParameters.json>")
sys.exit(-1)
experiment_name = sys.argv[1]
queue_type = sys.argv[2]
if queue_type == 'csv':
queue = CsvExperimentQueue(sys.argv[3])
elif queue_type == 'bayesianOpt':
queue = BayesianOptimizedExperimentQueue(sys.argv[3])
else:
raise Exception('Unrecognized queue type %s' % queue_type)
list_of_experiments_csv_path = sys.argv[2]
logger = ResultsLoggerServer(experiment_name, queue)
logger.run()
|
{
"content_hash": "187456a44c5b3d072ce3a8917ffe5923",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 138,
"avg_line_length": 49.2046783625731,
"alnum_prop": 0.5909198954124079,
"repo_name": "mallamanis/resultslogger",
"id": "2b0f183591b2509a04aae528b906b6b240773b96",
"size": "8414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resultslogger/server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3056"
},
{
"name": "Python",
"bytes": "27209"
}
],
"symlink_target": ""
}
|
"""
Generate summary statistics using Facets.
Facets must be installed. See: https://github.com/PAIR-code/facets
In particular, see the Python code here:
https://github.com/PAIR-code/facets/tree/master/facets_overview/python
"""
from feature_statistics_generator import ProtoFromTfRecordFiles
DATASET_PATH = "/tmp/adult.tfrecords"
OUTPUT_PATH = "/tmp/adult_summary.bin"
DATASET_NAME = "uci_census"
result = ProtoFromTfRecordFiles(
[{"name": DATASET_NAME, "path": DATASET_PATH}],
max_entries=1000000)
with open(OUTPUT_PATH, "w") as fout:
fout.write(result.SerializeToString())
|
{
"content_hash": "660334ef328dfc63945ea0de5b2b72f9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 31.05263157894737,
"alnum_prop": 0.752542372881356,
"repo_name": "brain-research/data-linter",
"id": "38ff8b654ac5cfe3f5c925395203cacfcbd56b02",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/summarize_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "148839"
},
{
"name": "Shell",
"bytes": "1036"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
import SuperDiffer.routes
|
{
"content_hash": "a27cc6ed169892da340bc747651e04a8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 39,
"avg_line_length": 17,
"alnum_prop": 0.7647058823529411,
"repo_name": "gpaOliveira/SuperDiffer",
"id": "b7a26c0a9ed6e74ca6b51fb114621a2fa3bb1357",
"size": "170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SuperDiffer/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6746"
},
{
"name": "HTML",
"bytes": "1886"
},
{
"name": "Python",
"bytes": "48948"
},
{
"name": "Shell",
"bytes": "1048"
}
],
"symlink_target": ""
}
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('layout.html')
if __name__ == '__main__':
app.run()
|
{
"content_hash": "c56ce9484bc7ffe5b6ac1fad1d5973ff",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 41,
"avg_line_length": 15.416666666666666,
"alnum_prop": 0.6108108108108108,
"repo_name": "hsrob/drink-league",
"id": "6a833c4bb4e08cfa4a011c82f36e7712c17b5878",
"size": "185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WhiskeyFlask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9453"
},
{
"name": "HTML",
"bytes": "8522"
},
{
"name": "JavaScript",
"bytes": "20609"
},
{
"name": "Python",
"bytes": "185"
}
],
"symlink_target": ""
}
|
import argparse
class sumeet_class(object):
def __init__(self, name, balance, bank_name):
self.name = name
self.balance = balance
self.bank_name = bank_name
def withdraw(self, amount):
if amount > self.balance:
raise RuntimeError("Amount greater than available balance")
self.balance -= amount
return self.balance
def deposit(self, amount):
self.balance += amount
return self.balance
@property
def name_return(self):
return self.name
def balance_return(self):
return self.balance
def bank_return(self):
return self.bank_name
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("-name", "--name", choices=["sumeet"],
help="name of the person")
parser.add_argument("-balance", "--balance", choices=[10, 20, 30],
type=int, help = "Initial balance")
parser.add_argument("-bank1", choices=["JPM", "HDFC", "AXIS"], dest="bank_name",
help="name of the person", default=False)
args = parser.parse_args() if not argv else parser.parse_args(argv)
print(args)
sumeet_obj = sumeet_class(**vars(args))
print(sumeet_obj.withdraw(10))
#print("Bank_name==============="+ args.bank_name)
if not sumeet_obj.bank_return():
print("noooo")
else:
print("Yessssss")
if __name__ == "__main__":
main()
s = "Hello {name} how are you? which {class1} you study in?"
print(s)
m = s.format(name = "Sumeet" , class1 = "10th")
print(m)
#}
|
{
"content_hash": "311d20bf0d43ddca3f802ac1f34cb00f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 88,
"avg_line_length": 28.3,
"alnum_prop": 0.5530035335689046,
"repo_name": "sum-coderepo/spark-scala",
"id": "010fd1df10b70f9854659070d4be68faeeace533",
"size": "1743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/SamplePython.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4371"
},
{
"name": "Java",
"bytes": "3606"
},
{
"name": "Python",
"bytes": "9301"
},
{
"name": "Scala",
"bytes": "100906"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
}
|
from neutron import manager
from neutron.tests.unit.bigswitch import test_base
from neutron.tests.unit import test_extension_security_group as test_sg
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
class RestProxySecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase,
test_base.BigSwitchTestBase):
plugin_str = ('%s.NeutronRestProxyV2' %
test_base.RESTPROXY_PKG_PATH)
def setUp(self, plugin=None):
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
self.setup_config_files()
self.setup_patches()
self._attribute_map_bk_ = {}
super(RestProxySecurityGroupsTestCase, self).setUp(self.plugin_str)
plugin = manager.NeutronManager.get_plugin()
self.notifier = plugin.notifier
self.rpc = plugin.endpoints[0]
self.startHttpPatch()
class TestSecServerRpcCallBack(test_sg_rpc.SGServerRpcCallBackTestCase,
RestProxySecurityGroupsTestCase):
pass
class TestSecurityGroupsMixin(test_sg.TestSecurityGroups,
test_sg_rpc.SGNotificationTestMixin,
RestProxySecurityGroupsTestCase):
pass
|
{
"content_hash": "4c5b187446a715958d123c8d49657ad8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 39.125,
"alnum_prop": 0.6693290734824281,
"repo_name": "uni2u/neutron",
"id": "409e9ce4f4542f528c4b5360f9760216227d116a",
"size": "1888",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/bigswitch/test_security_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from osmnodepbf import osmnodepbf
import os
parser = osmnodepbf.Parser("data/germany-latest.osm.pbf")
places = parser.parse({"place": {
"suburb", "borough", "quarter", "city", "neighbourhood", "city_block", "town", "village", "hamlet",
"isolated_dwelling", "farm", "allotments"}}, refresh=True)
f = file("data/germany-places.csv", "w")
for p in places:
for t in p["tag"]:
if t.keys()[0] == "name":
nm = t["name"]
break
# ideally, filter out commas from names
f.write(nm + "," + str(p["lon"]) + "," + str(p["lat"]) + os.linesep)
f.close()
|
{
"content_hash": "93b4c65cf16b6abaabf1ae08ba30936c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 103,
"avg_line_length": 28.38095238095238,
"alnum_prop": 0.5855704697986577,
"repo_name": "gaetjen/place_names",
"id": "f22b272294d8292994f13ee8a92a546660b8e6b8",
"size": "596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main_script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "15801154"
},
{
"name": "Python",
"bytes": "596"
}
],
"symlink_target": ""
}
|
""" """
from .file import FileUrl
class Hdf5Url(FileUrl):
"""URL that references an HDF5 file"""
match_priority = FileUrl.match_priority-5
def __init__(self, url=None, downloader=None, **kwargs):
super().__init__(url, downloader, **kwargs)
@classmethod
def _match(cls, url, **kwargs):
return url.resource_format in ('h5', 'hdf5')
@property
def target_format(self):
return 'h5'
def list(self, list_self=False):
import h5py
with h5py.File(str(self.fspath)) as f:
return list(f.keys())
@property
def generator(self):
"""
Return the generator for this URL, if the rowgenerator package is installed.
:return: A row generator object.
"""
from rowgenerators.core import get_generator
return get_generator(self, source_url=self)
|
{
"content_hash": "94c0e37f10d51071b7f368e303c84724",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 23.594594594594593,
"alnum_prop": 0.6036655211912944,
"repo_name": "CivicKnowledge/rowgenerators",
"id": "534a6a0491873f6e94af75a341aa5c72338f6c9d",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rowgenerators/appurl/file/hdf5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "823"
},
{
"name": "Python",
"bytes": "109674"
}
],
"symlink_target": ""
}
|
import pyscreenshot as ImageGrab
# part of the screen
im=ImageGrab.grab(bbox=(10,10,500,500))
im.show()
|
{
"content_hash": "ef03c92f52256afc70b78f826f8e3a17",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 21,
"alnum_prop": 0.7523809523809524,
"repo_name": "Faianca/Anime-Tv-shows-Scrapper",
"id": "5ad53689dc3af5a79ee9c6762f5d0b6161971043",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gui_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "77141"
},
{
"name": "HTML",
"bytes": "10450"
},
{
"name": "JavaScript",
"bytes": "980561"
},
{
"name": "Python",
"bytes": "38655"
}
],
"symlink_target": ""
}
|
import oanda.oanda_common.view
from oanda.position.view import print_positions_map
from oanda.order.view import print_orders_map
from oanda.trade.view import print_trades_map
def update_attribute(dest, name, value):
"""
Set dest[name] to value if it exists and is not None
"""
if hasattr(dest, name) and \
getattr(dest, name) is not None:
setattr(dest, name, value)
class Account(object):
"""
An Account object is a wrapper for the Account entities fetched from the
v20 REST API. It is used for caching and updating Account state.
"""
def __init__(self, account, transaction_cache_depth=100):
"""
Create a new Account wrapper
Args:
account: a v20.account.Account fetched from the server
"""
#
# The collection of Trades open in the Account
#
self.trades = {}
for trade in getattr(account, "trades", []):
self.trades[trade.id] = trade
setattr(account, "trades", None)
#
# The collection of Orders pending in the Account
#
self.orders = {}
for order in getattr(account, "orders", []):
self.orders[order.id] = order
setattr(account, "orders", None)
#
# Map from OrderID -> OrderState. Order State is tracked for
# TrailingStopLoss orders, and includes the trailingStopValue
# and triggerDistance
#
self.order_states = {}
#
# The collection of positions open in the account
#
self.positions = {}
for position in getattr(account, "positions", []):
self.positions[position.instrument] = position
setattr(account, "positions", None)
#
# Keep a cache of the last self.transaction_cache_depth Transactions
#
self.transaction_cache_depth = transaction_cache_depth
self.transactions = []
#
# The Account details
#
self.details = account
def dump(self):
"""
Print out the whole Account state
"""
oanda.oanda_common.view.print_entity(
self.details,
title=self.details.title()
)
print("")
print_positions_map(self.positions)
print_orders_map(self.orders)
print_trades_map(self.trades)
def trade_get(self, id):
"""
Fetch an open Trade
Args:
id: The ID of the Trade to fetch
Returns:
The Trade with the matching ID if it exists, None otherwise
"""
return self.trades.get(id, None)
def order_get(self, id):
"""
Fetch a pending Order
Args:
id: The ID of the Order to fetch
Returns:
The Order with the matching ID if it exists, None otherwise
"""
return self.orders.get(id, None)
def position_get(self, instrument):
"""
Fetch an open Position
Args:
instrument: The instrument of the Position to fetch
Returns:
The Position with the matching instrument if it exists, None
otherwise
"""
return self.positions.get(instrument, None)
def apply_changes(self, changes):
"""
Update the Account state with a set of changes provided by the server.
Args:
changes: a v20.account.AccountChanges object representing the
changes that have been made to the Account
"""
for order in changes.ordersCreated:
print("[Order Created] {}".format(order.title()))
self.orders[order.id] = order
for order in changes.ordersCancelled:
print("[Order Cancelled] {}".format(order.title()))
self.orders.pop(order.id, None)
for order in changes.ordersFilled:
print("[Order Filled] {}".format(order.title()))
self.orders.pop(order.id, None)
for order in changes.ordersTriggered:
print("[Order Triggered] {}".format(order.title()))
self.orders.pop(order.id, None)
for trade in changes.tradesOpened:
print("[Trade Opened] {}".format(trade.title()))
self.trades[trade.id] = trade
for trade in changes.tradesReduced:
print("[Trade Reduced] {}".format(trade.title()))
self.trades[trade.id] = trade
for trade in changes.tradesClosed:
print("[Trade Closed] {}".format(trade.title()))
self.trades.pop(trade.id, None)
for position in changes.positions:
print("[Position Changed] {}".format(position.instrument))
self.positions[position.instrument] = position
for transaction in changes.transactions:
print("[Transaction] {}".format(transaction.title()))
self.transactions.append(transaction)
if len(self.transactions) > self.transaction_cache_depth:
self.transactions.pop(0)
def apply_trade_states(self, trade_states):
"""
Update state for open Trades
Args:
trade_states: A list of v20.trade.CalculatedTradeState objects
representing changes to the state of open Trades
"""
for trade_state in trade_states:
trade = self.trade_get(trade_state.id)
if trade is None:
continue
for field in trade_state.fields():
setattr(trade, field.name, field.value)
def apply_position_states(self, position_states):
"""
Update state for all Positions
Args:
position_states: A list of v20.trade.CalculatedPositionState objects
representing changes to the state of open Position
"""
for position_state in position_states:
position = self.position_get(position_state.instrument)
if position is None:
continue
position.unrealizedPL = position_state.netUnrealizedPL
position.long.unrealizedPL = position_state.longUnrealizedPL
position.short.unrealizedPL = position_state.shortUnrealizedPL
def apply_order_states(self, order_states):
"""
Update state for all Orders
Args:
order_states: A list of v20.order.DynamicOrderState objects
representing changes to the state of pending Orders
"""
for order_state in order_states:
order = self.order_get(order_state.id)
if order is None:
continue
order.trailingStopValue = order_state.trailingStopValue
self.order_states[order.id] = order_state
def apply_state(self, state):
"""
Update the state of an Account
Args:
state: A v20.account.AccountState object representing changes to
the Account's trades, positions, orders and state.
"""
#
# Update Account details from the state
#
for field in state.fields():
update_attribute(self.details, field.name, field.value)
self.apply_trade_states(state.trades)
self.apply_position_states(state.positions)
self.apply_order_states(state.orders)
|
{
"content_hash": "8bd5293857e089f55250246238b4fd66",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 80,
"avg_line_length": 28.455938697318008,
"alnum_prop": 0.5812575737175172,
"repo_name": "JavierGarciaD/AlgoTrader",
"id": "8a09167f8fbe3443e8c6380bc6382a44f4367781",
"size": "7427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algotrader/brokers_handlers/oanda/account/account.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2052"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from os import getpid
from functools import partial
from bcc import BPF
from bcc.containers import filter_by_containers
import errno
import argparse
from time import strftime
# arguments
examples = """examples:
./capable # trace capability checks
./capable -v # verbose: include non-audit checks
./capable -p 181 # only trace PID 181
./capable -K # add kernel stacks to trace
./capable -U # add user-space stacks to trace
./capable -x # extra fields: show TID and INSETID columns
./capable --unique # don't repeat stacks for the same pid or cgroup
./capable --cgroupmap mappath # only trace cgroups in this BPF map
./capable --mntnsmap mappath # only trace mount namespaces in the map
"""
parser = argparse.ArgumentParser(
description="Trace security capability checks",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-v", "--verbose", action="store_true",
help="include non-audit checks")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-K", "--kernel-stack", action="store_true",
help="output kernel stack trace")
parser.add_argument("-U", "--user-stack", action="store_true",
help="output user stack trace")
parser.add_argument("-x", "--extra", action="store_true",
help="show extra fields in TID and INSETID columns")
parser.add_argument("--cgroupmap",
help="trace cgroups in this BPF map only")
parser.add_argument("--mntnsmap",
help="trace mount namespaces in this BPF map only")
parser.add_argument("--unique", action="store_true",
help="don't repeat stacks for the same pid or cgroup")
args = parser.parse_args()
debug = 0
# capabilities to names, generated from (and will need updating):
# awk '/^#define.CAP_.*[0-9]$/ { print " " $3 ": \"" $2 "\"," }' \
# include/uapi/linux/capability.h
capabilities = {
0: "CAP_CHOWN",
1: "CAP_DAC_OVERRIDE",
2: "CAP_DAC_READ_SEARCH",
3: "CAP_FOWNER",
4: "CAP_FSETID",
5: "CAP_KILL",
6: "CAP_SETGID",
7: "CAP_SETUID",
8: "CAP_SETPCAP",
9: "CAP_LINUX_IMMUTABLE",
10: "CAP_NET_BIND_SERVICE",
11: "CAP_NET_BROADCAST",
12: "CAP_NET_ADMIN",
13: "CAP_NET_RAW",
14: "CAP_IPC_LOCK",
15: "CAP_IPC_OWNER",
16: "CAP_SYS_MODULE",
17: "CAP_SYS_RAWIO",
18: "CAP_SYS_CHROOT",
19: "CAP_SYS_PTRACE",
20: "CAP_SYS_PACCT",
21: "CAP_SYS_ADMIN",
22: "CAP_SYS_BOOT",
23: "CAP_SYS_NICE",
24: "CAP_SYS_RESOURCE",
25: "CAP_SYS_TIME",
26: "CAP_SYS_TTY_CONFIG",
27: "CAP_MKNOD",
28: "CAP_LEASE",
29: "CAP_AUDIT_WRITE",
30: "CAP_AUDIT_CONTROL",
31: "CAP_SETFCAP",
32: "CAP_MAC_OVERRIDE",
33: "CAP_MAC_ADMIN",
34: "CAP_SYSLOG",
35: "CAP_WAKE_ALARM",
36: "CAP_BLOCK_SUSPEND",
37: "CAP_AUDIT_READ",
38: "CAP_PERFMON",
39: "CAP_BPF",
40: "CAP_CHECKPOINT_RESTORE",
}
class Enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
# Stack trace types
StackType = Enum(("Kernel", "User",))
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/security.h>
struct data_t {
u32 tgid;
u32 pid;
u32 uid;
int cap;
int audit;
int insetid;
char comm[TASK_COMM_LEN];
#ifdef KERNEL_STACKS
int kernel_stack_id;
#endif
#ifdef USER_STACKS
int user_stack_id;
#endif
};
BPF_PERF_OUTPUT(events);
#if UNIQUESET
struct repeat_t {
int cap;
u32 tgid;
#if CGROUPSET
u64 cgroupid;
#endif
#ifdef KERNEL_STACKS
int kernel_stack_id;
#endif
#ifdef USER_STACKS
int user_stack_id;
#endif
};
BPF_HASH(seen, struct repeat_t, u64);
#endif
#if defined(USER_STACKS) || defined(KERNEL_STACKS)
BPF_STACK_TRACE(stacks, 2048);
#endif
int kprobe__cap_capable(struct pt_regs *ctx, const struct cred *cred,
struct user_namespace *targ_ns, int cap, int cap_opt)
{
u64 __pid_tgid = bpf_get_current_pid_tgid();
u32 tgid = __pid_tgid >> 32;
u32 pid = __pid_tgid;
int audit;
int insetid;
#ifdef CAP_OPT_NONE
audit = (cap_opt & 0b10) == 0;
insetid = (cap_opt & 0b100) != 0;
#else
audit = cap_opt;
insetid = -1;
#endif
FILTER1
FILTER2
FILTER3
if (container_should_be_filtered()) {
return 0;
}
u32 uid = bpf_get_current_uid_gid();
struct data_t data = {};
data.tgid = tgid;
data.pid = pid;
data.uid = uid;
data.cap = cap;
data.audit = audit;
data.insetid = insetid;
#ifdef KERNEL_STACKS
data.kernel_stack_id = stacks.get_stackid(ctx, 0);
#endif
#ifdef USER_STACKS
data.user_stack_id = stacks.get_stackid(ctx, BPF_F_USER_STACK);
#endif
#if UNIQUESET
struct repeat_t repeat = {0,};
repeat.cap = cap;
#if CGROUP_ID_SET
repeat.cgroupid = bpf_get_current_cgroup_id();
#else
repeat.tgid = tgid;
#endif
#ifdef KERNEL_STACKS
repeat.kernel_stack_id = data.kernel_stack_id;
#endif
#ifdef USER_STACKS
repeat.user_stack_id = data.user_stack_id;
#endif
if (seen.lookup(&repeat) != NULL) {
return 0;
}
u64 zero = 0;
seen.update(&repeat, &zero);
#endif
bpf_get_current_comm(&data.comm, sizeof(data.comm));
events.perf_submit(ctx, &data, sizeof(data));
return 0;
};
"""
if args.pid:
bpf_text = bpf_text.replace('FILTER1',
'if (pid != %s) { return 0; }' % args.pid)
if not args.verbose:
bpf_text = bpf_text.replace('FILTER2', 'if (audit == 0) { return 0; }')
if args.kernel_stack:
bpf_text = "#define KERNEL_STACKS\n" + bpf_text
if args.user_stack:
bpf_text = "#define USER_STACKS\n" + bpf_text
bpf_text = bpf_text.replace('FILTER1', '')
bpf_text = bpf_text.replace('FILTER2', '')
bpf_text = bpf_text.replace('FILTER3',
'if (pid == %s) { return 0; }' % getpid())
bpf_text = filter_by_containers(args) + bpf_text
if args.unique:
bpf_text = bpf_text.replace('UNIQUESET', '1')
else:
bpf_text = bpf_text.replace('UNIQUESET', '0')
if debug:
print(bpf_text)
# initialize BPF
b = BPF(text=bpf_text)
# header
if args.extra:
print("%-9s %-6s %-6s %-6s %-16s %-4s %-20s %-6s %s" % (
"TIME", "UID", "PID", "TID", "COMM", "CAP", "NAME", "AUDIT", "INSETID"))
else:
print("%-9s %-6s %-6s %-16s %-4s %-20s %-6s" % (
"TIME", "UID", "PID", "COMM", "CAP", "NAME", "AUDIT"))
def stack_id_err(stack_id):
# -EFAULT in get_stackid normally means the stack-trace is not available,
# Such as getting kernel stack trace in userspace code
return (stack_id < 0) and (stack_id != -errno.EFAULT)
def print_stack(bpf, stack_id, stack_type, tgid):
if stack_id_err(stack_id):
print(" [Missed %s Stack]" % stack_type)
return
stack = list(bpf.get_table("stacks").walk(stack_id))
for addr in stack:
print(" ", end="")
print("%s" % (bpf.sym(addr, tgid, show_module=True, show_offset=True)))
# process event
def print_event(bpf, cpu, data, size):
event = b["events"].event(data)
if event.cap in capabilities:
name = capabilities[event.cap]
else:
name = "?"
if args.extra:
print("%-9s %-6d %-6d %-6d %-16s %-4d %-20s %-6d %s" % (strftime("%H:%M:%S"),
event.uid, event.pid, event.tgid, event.comm.decode('utf-8', 'replace'),
event.cap, name, event.audit, str(event.insetid) if event.insetid != -1 else "N/A"))
else:
print("%-9s %-6d %-6d %-16s %-4d %-20s %-6d" % (strftime("%H:%M:%S"),
event.uid, event.pid, event.comm.decode('utf-8', 'replace'),
event.cap, name, event.audit))
if args.kernel_stack:
print_stack(bpf, event.kernel_stack_id, StackType.Kernel, -1)
if args.user_stack:
print_stack(bpf, event.user_stack_id, StackType.User, event.tgid)
# loop with callback to print_event
callback = partial(print_event, b)
b["events"].open_perf_buffer(callback)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
{
"content_hash": "70a5a6c9bed703c1bab8f6ad21fd091b",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 96,
"avg_line_length": 28.031141868512112,
"alnum_prop": 0.6143685964695716,
"repo_name": "iovisor/bcc",
"id": "db78de39e562f0af5beb84f56125cd8db3a2f860",
"size": "8479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/capable.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11636356"
},
{
"name": "C++",
"bytes": "916663"
},
{
"name": "CMake",
"bytes": "58262"
},
{
"name": "HTML",
"bytes": "2997"
},
{
"name": "Lua",
"bytes": "299473"
},
{
"name": "Makefile",
"bytes": "5763"
},
{
"name": "Python",
"bytes": "1449659"
},
{
"name": "Shell",
"bytes": "21840"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.forms.models import inlineformset_factory
from django.http import Http404
from django.http import HttpResponseRedirect
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView
from django.views.generic.edit import DeleteView
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from ..pypi_ui.shortcuts import render
from .decorators import user_maintains_package, user_owns_package
from .models import Package
from .models import Release
from .models import Distribution
from .forms import ReleaseForm
from .forms import DistributionUploadForm
from ..pypi_metadata.forms import METADATA_FORMS
class SingleReleaseMixin(SingleObjectMixin):
model = Release
slug_field = 'version'
slug_url_kwarg = 'version'
context_object_name = 'release'
def get_queryset(self):
return self.model.objects.filter(package__name=self.kwargs['package_name'])
class ReleaseDetails(SingleReleaseMixin, DetailView):
template_name = 'pypi_packages/release_detail.html'
class DeleteRelease(SingleReleaseMixin, DeleteView):
success_url = reverse_lazy('djangopypi2-packages-index')
@method_decorator(user_owns_package())
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(DeleteRelease, self).dispatch(request, *args, **kwargs)
class ManageRelease(SingleReleaseMixin, UpdateView):
template_name = 'pypi_packages/release_manage.html'
form_class = ReleaseForm
@method_decorator(user_maintains_package())
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(ManageRelease, self).dispatch(request, *args, **kwargs)
def _get_release(request, package_name, version):
release = get_object_or_404(Package, name=package_name).get_release(version)
if not release:
raise Http404('Version %s does not exist for %s' % (version, package_name))
return release
@user_maintains_package()
def manage_metadata(request, package_name, version):
release = _get_release(request, package_name, version)
if not release.metadata_version in METADATA_FORMS:
#TODO: Need to change this to a more meaningful error
raise Http404()
form_class = METADATA_FORMS.get(release.metadata_version)
initial = {}
multivalue = ('classifier',)
for key, values in release.package_info.iterlists():
if key in multivalue:
initial[key] = values
else:
initial[key] = '\n'.join(values)
if request.method == 'POST':
form = form_class(data=request.POST, initial=initial)
if form.is_valid():
for key, value in form.cleaned_data.iteritems():
if isinstance(value, basestring):
release.package_info[key] = value
elif hasattr(value, '__iter__'):
release.package_info.setlist(key, list(value))
release.save()
return HttpResponseRedirect(release.get_absolute_url())
else:
form = form_class(initial=initial)
return render(request,
'pypi_packages/release_manage.html',
dict(release=release, form=form),
content_type = settings.DEFAULT_CONTENT_TYPE,
)
@user_maintains_package()
def manage_files(request, package_name, version):
release = _get_release(request, package_name, version)
formset_factory = inlineformset_factory(Release, Distribution, fields=('comment', ), extra=0)
if request.method == 'POST':
formset = formset_factory(data=request.POST,
files=request.FILES,
instance=release)
if formset.is_valid():
formset.save()
formset = formset_factory(instance=release)
else:
formset = formset_factory(instance=release)
return render(request,
'pypi_packages/release_manage_files.html',
dict(release=release, formset=formset, upload_form=DistributionUploadForm()),
content_type = settings.DEFAULT_CONTENT_TYPE,
)
@user_maintains_package()
def upload_file(request, package_name, version):
release = _get_release(request, package_name, version)
if request.method == 'POST':
form = DistributionUploadForm(data=request.POST, files=request.FILES)
if form.is_valid():
dist = form.save(commit=False)
dist.release = release
dist.uploader = request.user
dist.save()
return HttpResponseRedirect(reverse_lazy('djangopypi2-release-manage-files',
kwargs=dict(package_name=package_name, version=version)))
else:
form = DistributionUploadForm()
return render(request,
'pypi_packages/release_upload_file.html',
dict(release=release, form=form),
content_type = settings.DEFAULT_CONTENT_TYPE,
)
|
{
"content_hash": "2226d4d7a3cc08a024474e0353057e40",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 97,
"avg_line_length": 37.82608695652174,
"alnum_prop": 0.6772030651340997,
"repo_name": "popen2/djangopypi2",
"id": "d5f426523fb52c97d7c602656d055bcbf6b66fb1",
"size": "5220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangopypi2/apps/pypi_packages/release_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1294"
},
{
"name": "HTML",
"bytes": "32170"
},
{
"name": "Python",
"bytes": "150011"
}
],
"symlink_target": ""
}
|
""" tests all modules for syntax correctness and internal imports
"""
from glob import glob
from importlib import import_module
from inspect import (
getfile,
currentframe
)
from os import walk
from os.path import (
abspath,
dirname,
join,
split
)
from sys import path as syspath
SCRIPT_PATH = dirname(abspath(getfile(currentframe())))
PROJECT_ROOT = dirname(SCRIPT_PATH)
ROOT_PACKAGE_NAME = 'ReOBJ'
ROOT_PACKAGE_PATH = join(PROJECT_ROOT, ROOT_PACKAGE_NAME)
syspath.insert(0, PROJECT_ROOT)
def test_all_imports():
""" Tests: all modules for syntax correctness and internal imports
"""
print('::: TEST: test_all_imports()')
chars_to_cut = len(PROJECT_ROOT) + 1
full_modules_path = []
for root, dirnames, filenames in walk(ROOT_PACKAGE_PATH):
full_modules_path.extend(glob(root + '/*.py'))
for full_path in full_modules_path:
packagepath, modulepath = split(full_path)
package = packagepath[chars_to_cut:].replace('/', '.')
if '__' not in package: # skip packages with '__' e.g. __pycache__
module_full_name = '{}.{}'.format(package, modulepath[:-3])
import_module(module_full_name)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
if __name__ == '__main__':
test_all_imports()
|
{
"content_hash": "018898e97a009fcf676a3b60b6717599",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 74,
"avg_line_length": 28.152173913043477,
"alnum_prop": 0.6277992277992278,
"repo_name": "peter1000/ReOBJ",
"id": "f326d3a976dfd0b320618a19e2ccd1b523521d19",
"size": "1295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/TestImports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "217803"
}
],
"symlink_target": ""
}
|
from PIL import Image
class Resizer(object):
def __init__(self, expand=False, filter=Image.ANTIALIAS):
self.expand = expand
self.filter = filter
def transformations(self, size, target_size):
# should return a list of JSON-serializable commands
# that are processed with transform method
raise NotImplementedError
def transform(self, img, transformation, params):
# transformations MUST be idempotent
if transformation == 'crop':
return img.crop(params)
elif transformation == 'resize':
return img.resize(params, self.filter)
else:
raise NotImplementedError(transformation)
def get_target_size(self, size, target_size):
transforms = self.transformations(size, target_size)
for transformation, params in transforms:
if transformation == 'resize':
size = params
elif transformation == 'crop':
size = (params[2] - params[0], params[3] - params[1])
else:
raise NotImplementedError(transformation)
return size
def __call__(self, img, target_size):
transforms = self.transformations(img.size, target_size)
for transformation, params in transforms:
img = self.transform(img, transformation, params)
return img
class ResizeFit(Resizer):
def transformations(self, size, target_size):
sw, sh = size
tw, th = target_size
if not self.expand and sw<=tw and sh<=th:
return []
if sw*th>sh*tw:
h = sh*tw//sw
return [('resize', (tw, h))]
else:
w = sw*th//sh
return [('resize', (w, th))]
class ResizeCrop(Resizer):
def __init__(self, *args, **kwargs):
self.force = kwargs.pop('force', False)
Resizer.__init__(self, *args, **kwargs)
assert not (self.force and self.expand)
def transformations(self, size, target_size):
sw, sh = size
tw, th = target_size
if not self.expand and not self.force and sw<=tw and sh<=th:
return []
if self.force and (sw<=tw or sh<=th):
if sw*th>sh*tw:
# crop right and left side
tw, th = sh*tw//th, sh
else:
# crop upper and bottom side
tw, th = sw, sw*th//tw
transforms = []
if sw*th>sh*tw:
# crop right and left side
if sh!=th and (sh>th or self.expand):
w = sw*th//sh
transforms.append(('resize', (w, th)))
sw, sh = w, th
if sw>tw:
wd = (sw-tw)//2
transforms.append(('crop', (wd, 0, tw+wd, sh)))
else:
# crop upper and bottom side
if sw!=tw and (sw>tw or self.expand):
h = sh*tw//sw
transforms.append(('resize', (tw, h)))
sw, sh = tw, h
if sh>th:
hd = (sh-th)//2
transforms.append(('crop', (0, hd, sw, th+hd)))
return transforms
class ResizeMixed(Resizer):
def __init__(self, hor_resize, vert_resize, rate=1):
self.hor_resize = hor_resize
self.vert_resize = vert_resize
self.rate = rate
def get_resizer(self, size, target_size):
sw, sh = size
if sw >= sh * self.rate:
return self.hor_resize
else:
return self.vert_resize
def transformations(self, size, target_size):
return self.get_resizer(size, target_size)\
.transformations(size, target_size)
#def transform(self, img, *args):
# XXX is this method needed?
# return self.get_resizer(img).transform(img, *args)
def __call__(self, img, target_size):
return self.get_resizer(img.size, target_size)(img, target_size)
class ResizeFixedWidth(Resizer):
def transformations(self, size, target_size):
sw, sh = size
tw, th = target_size
if not self.expand and sw<=tw:
return []
h = sh*tw//sw
return [('resize', (tw, h))]
class ResizeFixedHeight(Resizer):
def transformations(self, size, target_size):
sw, sh = size
tw, th = target_size
if not self.expand and sh<=th:
return []
w = sw*th//sh
return [('resize', (w, th))]
|
{
"content_hash": "ac24604b1fce8b12e3a13dbccf8e8c25",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 72,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.5389100695223145,
"repo_name": "Lehych/iktomi",
"id": "8b8816e77a9998e0d60d050e1905a10ec5bbe13b",
"size": "4459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iktomi/unstable/utils/image_resizers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7846"
},
{
"name": "Python",
"bytes": "510161"
},
{
"name": "Shell",
"bytes": "1343"
}
],
"symlink_target": ""
}
|
""" MiniMark GC.
Environment variables can be used to fine-tune the following parameters:
PYPY_GC_NURSERY The nursery size. Defaults to 1/2 of your cache or
'4M'. Small values
(like 1 or 1KB) are useful for debugging.
PYPY_GC_NURSERY_CLEANUP The interval at which nursery is cleaned up. Must
be smaller than the nursery size and bigger than the
biggest object we can allotate in the nursery.
PYPY_GC_MAJOR_COLLECT Major collection memory factor. Default is '1.82',
which means trigger a major collection when the
memory consumed equals 1.82 times the memory
really used at the end of the previous major
collection.
PYPY_GC_GROWTH Major collection threshold's max growth rate.
Default is '1.4'. Useful to collect more often
than normally on sudden memory growth, e.g. when
there is a temporary peak in memory usage.
PYPY_GC_MAX The max heap size. If coming near this limit, it
will first collect more often, then raise an
RPython MemoryError, and if that is not enough,
crash the program with a fatal error. Try values
like '1.6GB'.
PYPY_GC_MAX_DELTA The major collection threshold will never be set
to more than PYPY_GC_MAX_DELTA the amount really
used after a collection. Defaults to 1/8th of the
total RAM size (which is constrained to be at most
2/3/4GB on 32-bit systems). Try values like '200MB'.
PYPY_GC_MIN Don't collect while the memory size is below this
limit. Useful to avoid spending all the time in
the GC in very small programs. Defaults to 8
times the nursery.
PYPY_GC_DEBUG Enable extra checks around collections that are
too slow for normal use. Values are 0 (off),
1 (on major collections) or 2 (also on minor
collections).
"""
# XXX Should find a way to bound the major collection threshold by the
# XXX total addressable size. Maybe by keeping some minimarkpage arenas
# XXX pre-reserved, enough for a few nursery collections? What about
# XXX raw-malloced memory?
import sys
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena, llgroup
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.lltypesystem.llmemory import raw_malloc_usage
from rpython.memory.gc.base import GCBase, MovingGCBase
from rpython.memory.gc import env
from rpython.memory.support import mangle_hash
from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint
from rpython.rlib.rarithmetic import LONG_BIT_SHIFT
from rpython.rlib.debug import ll_assert, debug_print, debug_start, debug_stop
from rpython.rlib.objectmodel import specialize
#
# Handles the objects in 2 generations:
#
# * young objects: allocated in the nursery if they are not too large, or
# raw-malloced otherwise. The nursery is a fixed-size memory buffer of
# 4MB by default. When full, we do a minor collection;
# the surviving objects from the nursery are moved outside, and the
# non-surviving raw-malloced objects are freed. All surviving objects
# become old.
#
# * old objects: never move again. These objects are either allocated by
# minimarkpage.py (if they are small), or raw-malloced (if they are not
# small). Collected by regular mark-n-sweep during major collections.
#
WORD = LONG_BIT // 8
NULL = llmemory.NULL
first_gcflag = 1 << (LONG_BIT//2)
# The following flag is set on objects if we need to do something to
# track the young pointers that it might contain. The flag is not set
# on young objects (unless they are large arrays, see below), and we
# simply assume that any young object can point to any other young object.
# For old and prebuilt objects, the flag is usually set, and is cleared
# when we write a young pointer to it. For large arrays with
# GCFLAG_HAS_CARDS, we rely on card marking to track where the
# young pointers are; the flag GCFLAG_TRACK_YOUNG_PTRS is set in this
# case too, to speed up the write barrier.
GCFLAG_TRACK_YOUNG_PTRS = first_gcflag << 0
# The following flag is set on some prebuilt objects. The flag is set
# unless the object is already listed in 'prebuilt_root_objects'.
# When a pointer is written inside an object with GCFLAG_NO_HEAP_PTRS
# set, the write_barrier clears the flag and adds the object to
# 'prebuilt_root_objects'.
GCFLAG_NO_HEAP_PTRS = first_gcflag << 1
# The following flag is set on surviving objects during a major collection,
# and on surviving raw-malloced young objects during a minor collection.
GCFLAG_VISITED = first_gcflag << 2
# The following flag is set on nursery objects of which we asked the id
# or the identityhash. It means that a space of the size of the object
# has already been allocated in the nonmovable part.
GCFLAG_HAS_SHADOW = first_gcflag << 3
# The following flag is set temporarily on some objects during a major
# collection. See pypy/doc/discussion/finalizer-order.txt
GCFLAG_FINALIZATION_ORDERING = first_gcflag << 4
# This flag is reserved for RPython.
GCFLAG_EXTRA = first_gcflag << 5
# The following flag is set on externally raw_malloc'ed arrays of pointers.
# They are allocated with some extra space in front of them for a bitfield,
# one bit per 'card_page_indices' indices.
GCFLAG_HAS_CARDS = first_gcflag << 6
GCFLAG_CARDS_SET = first_gcflag << 7 # <- at least one card bit is set
# note that GCFLAG_CARDS_SET is the most significant bit of a byte:
# this is required for the JIT (x86)
_GCFLAG_FIRST_UNUSED = first_gcflag << 8 # the first unused bit
FORWARDSTUB = lltype.GcStruct('forwarding_stub',
('forw', llmemory.Address))
FORWARDSTUBPTR = lltype.Ptr(FORWARDSTUB)
NURSARRAY = lltype.Array(llmemory.Address)
# ____________________________________________________________
class MiniMarkGC(MovingGCBase):
_alloc_flavor_ = "raw"
inline_simple_malloc = True
inline_simple_malloc_varsize = True
needs_write_barrier = True
prebuilt_gc_objects_are_static_roots = False
malloc_zero_filled = True # xxx experiment with False
gcflag_extra = GCFLAG_EXTRA
# All objects start with a HDR, i.e. with a field 'tid' which contains
# a word. This word is divided in two halves: the lower half contains
# the typeid, and the upper half contains various flags, as defined
# by GCFLAG_xxx above.
HDR = lltype.Struct('header', ('tid', lltype.Signed))
typeid_is_in_field = 'tid'
_ADDRARRAY = lltype.Array(llmemory.Address, hints={'nolength': True})
# During a minor collection, the objects in the nursery that are
# moved outside are changed in-place: their header is replaced with
# the value -42, and the following word is set to the address of
# where the object was moved. This means that all objects in the
# nursery need to be at least 2 words long, but objects outside the
# nursery don't need to.
minimal_size_in_nursery = (
llmemory.sizeof(HDR) + llmemory.sizeof(llmemory.Address))
TRANSLATION_PARAMS = {
# Automatically adjust the size of the nursery and the
# 'major_collection_threshold' from the environment.
# See docstring at the start of the file.
"read_from_env": True,
# The size of the nursery. Note that this is only used as a
# fall-back number.
"nursery_size": 896*1024,
# The system page size. Like malloc, we assume that it is 4K
# for 32-bit systems; unlike malloc, we assume that it is 8K
# for 64-bit systems, for consistent results.
"page_size": 1024*WORD,
# The size of an arena. Arenas are groups of pages allocated
# together.
"arena_size": 65536*WORD,
# The maximum size of an object allocated compactly. All objects
# that are larger are just allocated with raw_malloc(). Note that
# the size limit for being first allocated in the nursery is much
# larger; see below.
"small_request_threshold": 35*WORD,
# Full collection threshold: after a major collection, we record
# the total size consumed; and after every minor collection, if the
# total size is now more than 'major_collection_threshold' times,
# we trigger the next major collection.
"major_collection_threshold": 1.82,
# Threshold to avoid that the total heap size grows by a factor of
# major_collection_threshold at every collection: it can only
# grow at most by the following factor from one collection to the
# next. Used e.g. when there is a sudden, temporary peak in memory
# usage; this avoids that the upper bound grows too fast.
"growth_rate_max": 1.4,
# The number of array indices that are mapped to a single bit in
# write_barrier_from_array(). Must be a power of two. The default
# value of 128 means that card pages are 512 bytes (1024 on 64-bits)
# in regular arrays of pointers; more in arrays whose items are
# larger. A value of 0 disables card marking.
"card_page_indices": 128,
# Objects whose total size is at least 'large_object' bytes are
# allocated out of the nursery immediately, as old objects. The
# minimal allocated size of the nursery is 2x the following
# number (by default, at least 132KB on 32-bit and 264KB on 64-bit).
"large_object": (16384+512)*WORD,
# This is the chunk that we cleanup in the nursery. The point is
# to avoid having to trash all the caches just to zero the nursery,
# so we trade it by cleaning it bit-by-bit, as we progress through
# nursery. Has to fit at least one large object
"nursery_cleanup": 32768 * WORD,
}
def __init__(self, config,
read_from_env=False,
nursery_size=32*WORD,
nursery_cleanup=9*WORD,
page_size=16*WORD,
arena_size=64*WORD,
small_request_threshold=5*WORD,
major_collection_threshold=2.5,
growth_rate_max=2.5, # for tests
card_page_indices=0,
large_object=8*WORD,
ArenaCollectionClass=None,
**kwds):
MovingGCBase.__init__(self, config, **kwds)
assert small_request_threshold % WORD == 0
self.read_from_env = read_from_env
self.nursery_size = nursery_size
self.nursery_cleanup = nursery_cleanup
self.small_request_threshold = small_request_threshold
self.major_collection_threshold = major_collection_threshold
self.growth_rate_max = growth_rate_max
self.num_major_collects = 0
self.min_heap_size = 0.0
self.max_heap_size = 0.0
self.max_heap_size_already_raised = False
self.max_delta = float(r_uint(-1))
#
self.card_page_indices = card_page_indices
if self.card_page_indices > 0:
self.card_page_shift = 0
while (1 << self.card_page_shift) < self.card_page_indices:
self.card_page_shift += 1
#
# 'large_object' limit how big objects can be in the nursery, so
# it gives a lower bound on the allowed size of the nursery.
self.nonlarge_max = large_object - 1
#
self.nursery = NULL
self.nursery_free = NULL
self.nursery_top = NULL
self.nursery_real_top = NULL
self.debug_tiny_nursery = -1
self.debug_rotating_nurseries = lltype.nullptr(NURSARRAY)
self.extra_threshold = 0
#
# The ArenaCollection() handles the nonmovable objects allocation.
if ArenaCollectionClass is None:
from rpython.memory.gc import minimarkpage
ArenaCollectionClass = minimarkpage.ArenaCollection
self.ac = ArenaCollectionClass(arena_size, page_size,
small_request_threshold)
#
# Used by minor collection: a list of (mostly non-young) objects that
# (may) contain a pointer to a young object. Populated by
# the write barrier: when we clear GCFLAG_TRACK_YOUNG_PTRS, we
# add it to this list.
# Note that young array objects may (by temporary "mistake") be added
# to this list, but will be removed again at the start of the next
# minor collection.
self.old_objects_pointing_to_young = self.AddressStack()
#
# Similar to 'old_objects_pointing_to_young', but lists objects
# that have the GCFLAG_CARDS_SET bit. For large arrays. Note
# that it is possible for an object to be listed both in here
# and in 'old_objects_pointing_to_young', in which case we
# should just clear the cards and trace it fully, as usual.
# Note also that young array objects are never listed here.
self.old_objects_with_cards_set = self.AddressStack()
#
# A list of all prebuilt GC objects that contain pointers to the heap
self.prebuilt_root_objects = self.AddressStack()
#
self._init_writebarrier_logic()
def setup(self):
"""Called at run-time to initialize the GC."""
#
# Hack: MovingGCBase.setup() sets up stuff related to id(), which
# we implement differently anyway. So directly call GCBase.setup().
GCBase.setup(self)
#
# Two lists of all raw_malloced objects (the objects too large)
self.young_rawmalloced_objects = self.null_address_dict()
self.old_rawmalloced_objects = self.AddressStack()
self.rawmalloced_total_size = r_uint(0)
#
# Two lists of all objects with finalizers. Actually they are lists
# of pairs (finalization_queue_nr, object). "probably young objects"
# are all traced and moved to the "old" list by the next minor
# collection.
self.probably_young_objects_with_finalizers = self.AddressDeque()
self.old_objects_with_finalizers = self.AddressDeque()
p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw',
track_allocation=False)
self.singleaddr = llmemory.cast_ptr_to_adr(p)
#
# Two lists of all objects with destructors.
self.young_objects_with_destructors = self.AddressStack()
self.old_objects_with_destructors = self.AddressStack()
#
# Two lists of the objects with weakrefs. No weakref can be an
# old object weakly pointing to a young object: indeed, weakrefs
# are immutable so they cannot point to an object that was
# created after it.
self.young_objects_with_weakrefs = self.AddressStack()
self.old_objects_with_weakrefs = self.AddressStack()
#
# Support for id and identityhash: map nursery objects with
# GCFLAG_HAS_SHADOW to their future location at the next
# minor collection.
self.nursery_objects_shadows = self.AddressDict()
#
# Allocate a nursery. In case of auto_nursery_size, start by
# allocating a very small nursery, enough to do things like look
# up the env var, which requires the GC; and then really
# allocate the nursery of the final size.
if not self.read_from_env:
self.allocate_nursery()
else:
#
defaultsize = self.nursery_size
minsize = 2 * (self.nonlarge_max + 1)
self.nursery_size = minsize
self.allocate_nursery()
#
# From there on, the GC is fully initialized and the code
# below can use it
newsize = env.read_from_env('PYPY_GC_NURSERY')
# PYPY_GC_NURSERY=smallvalue means that minor collects occur
# very frequently; the extreme case is PYPY_GC_NURSERY=1, which
# forces a minor collect for every malloc. Useful to debug
# external factors, like trackgcroot or the handling of the write
# barrier. Implemented by still using 'minsize' for the nursery
# size (needed to handle mallocs just below 'large_objects') but
# hacking at the current nursery position in collect_and_reserve().
if newsize <= 0:
newsize = env.estimate_best_nursery_size()
if newsize <= 0:
newsize = defaultsize
if newsize < minsize:
self.debug_tiny_nursery = newsize & ~(WORD-1)
newsize = minsize
nurs_cleanup = env.read_from_env('PYPY_GC_NURSERY_CLEANUP')
if nurs_cleanup > 0:
self.nursery_cleanup = nurs_cleanup
#
major_coll = env.read_float_from_env('PYPY_GC_MAJOR_COLLECT')
if major_coll > 1.0:
self.major_collection_threshold = major_coll
#
growth = env.read_float_from_env('PYPY_GC_GROWTH')
if growth > 1.0:
self.growth_rate_max = growth
#
min_heap_size = env.read_uint_from_env('PYPY_GC_MIN')
if min_heap_size > 0:
self.min_heap_size = float(min_heap_size)
else:
# defaults to 8 times the nursery
self.min_heap_size = newsize * 8
#
max_heap_size = env.read_uint_from_env('PYPY_GC_MAX')
if max_heap_size > 0:
self.max_heap_size = float(max_heap_size)
#
max_delta = env.read_uint_from_env('PYPY_GC_MAX_DELTA')
if max_delta > 0:
self.max_delta = float(max_delta)
else:
self.max_delta = 0.125 * env.get_total_memory()
#
self.minor_collection() # to empty the nursery
llarena.arena_free(self.nursery)
self.nursery_size = newsize
self.allocate_nursery()
#
if self.nursery_cleanup < self.nonlarge_max + 1:
self.nursery_cleanup = self.nonlarge_max + 1
# We need exactly initial_cleanup + N*nursery_cleanup = nursery_size.
# We choose the value of initial_cleanup to be between 1x and 2x the
# value of nursery_cleanup.
self.initial_cleanup = self.nursery_cleanup + (
self.nursery_size % self.nursery_cleanup)
if (r_uint(self.initial_cleanup) > r_uint(self.nursery_size) or
self.debug_tiny_nursery >= 0):
self.initial_cleanup = self.nursery_size
def _nursery_memory_size(self):
extra = self.nonlarge_max + 1
return self.nursery_size + extra
def _alloc_nursery(self):
# the start of the nursery: we actually allocate a bit more for
# the nursery than really needed, to simplify pointer arithmetic
# in malloc_fixedsize_clear(). The few extra pages are never used
# anyway so it doesn't even count.
nursery = llarena.arena_malloc(self._nursery_memory_size(), 2)
if not nursery:
raise MemoryError("cannot allocate nursery")
return nursery
def allocate_nursery(self):
debug_start("gc-set-nursery-size")
debug_print("nursery size:", self.nursery_size)
self.nursery = self._alloc_nursery()
# the current position in the nursery:
self.nursery_free = self.nursery
# the end of the nursery:
self.nursery_top = self.nursery + self.nursery_size
self.nursery_real_top = self.nursery_top
# initialize the threshold
self.min_heap_size = max(self.min_heap_size, self.nursery_size *
self.major_collection_threshold)
# the following two values are usually equal, but during raw mallocs
# of arrays, next_major_collection_threshold is decremented to make
# the next major collection arrive earlier.
# See translator/c/test/test_newgc, test_nongc_attached_to_gc
self.next_major_collection_initial = self.min_heap_size
self.next_major_collection_threshold = self.min_heap_size
self.set_major_threshold_from(0.0)
ll_assert(self.extra_threshold == 0, "extra_threshold set too early")
self.initial_cleanup = self.nursery_size
debug_stop("gc-set-nursery-size")
def set_major_threshold_from(self, threshold, reserving_size=0):
# Set the next_major_collection_threshold.
threshold_max = (self.next_major_collection_initial *
self.growth_rate_max)
if threshold > threshold_max:
threshold = threshold_max
#
threshold += reserving_size
if threshold < self.min_heap_size:
threshold = self.min_heap_size
#
if self.max_heap_size > 0.0 and threshold > self.max_heap_size:
threshold = self.max_heap_size
bounded = True
else:
bounded = False
#
self.next_major_collection_initial = threshold
self.next_major_collection_threshold = threshold
return bounded
def post_setup(self):
# set up extra stuff for PYPY_GC_DEBUG.
MovingGCBase.post_setup(self)
if self.DEBUG and llarena.has_protect:
# gc debug mode: allocate 23 nurseries instead of just 1,
# and use them alternatively, while mprotect()ing the unused
# ones to detect invalid access.
debug_start("gc-debug")
self.debug_rotating_nurseries = lltype.malloc(
NURSARRAY, 22, flavor='raw', track_allocation=False)
i = 0
while i < 22:
nurs = self._alloc_nursery()
llarena.arena_protect(nurs, self._nursery_memory_size(), True)
self.debug_rotating_nurseries[i] = nurs
i += 1
debug_print("allocated", len(self.debug_rotating_nurseries),
"extra nurseries")
debug_stop("gc-debug")
def debug_rotate_nursery(self):
if self.debug_rotating_nurseries:
debug_start("gc-debug")
oldnurs = self.nursery
llarena.arena_protect(oldnurs, self._nursery_memory_size(), True)
#
newnurs = self.debug_rotating_nurseries[0]
i = 0
while i < len(self.debug_rotating_nurseries) - 1:
self.debug_rotating_nurseries[i] = (
self.debug_rotating_nurseries[i + 1])
i += 1
self.debug_rotating_nurseries[i] = oldnurs
#
llarena.arena_protect(newnurs, self._nursery_memory_size(), False)
self.nursery = newnurs
self.nursery_top = self.nursery + self.initial_cleanup
self.nursery_real_top = self.nursery + self.nursery_size
debug_print("switching from nursery", oldnurs,
"to nursery", self.nursery,
"size", self.nursery_size)
debug_stop("gc-debug")
def malloc_fixedsize_clear(self, typeid, size,
needs_finalizer=False,
is_finalizer_light=False,
contains_weakptr=False):
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
rawtotalsize = raw_malloc_usage(totalsize)
#
# If the object needs a finalizer, ask for a rawmalloc.
# The following check should be constant-folded.
if needs_finalizer and not is_finalizer_light:
# old-style finalizers only!
ll_assert(not contains_weakptr,
"'needs_finalizer' and 'contains_weakptr' both specified")
obj = self.external_malloc(typeid, 0, alloc_young=False)
res = llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
self.register_finalizer(-1, res)
return res
#
# If totalsize is greater than nonlarge_max (which should never be
# the case in practice), ask for a rawmalloc. The following check
# should be constant-folded.
if rawtotalsize > self.nonlarge_max:
ll_assert(not contains_weakptr,
"'contains_weakptr' specified for a large object")
obj = self.external_malloc(typeid, 0, alloc_young=True)
#
else:
# If totalsize is smaller than minimal_size_in_nursery, round it
# up. The following check should also be constant-folded.
min_size = raw_malloc_usage(self.minimal_size_in_nursery)
if rawtotalsize < min_size:
totalsize = rawtotalsize = min_size
#
# Get the memory from the nursery. If there is not enough space
# there, do a collect first.
result = self.nursery_free
self.nursery_free = result + totalsize
if self.nursery_free > self.nursery_top:
result = self.collect_and_reserve(result, totalsize)
#
# Build the object.
llarena.arena_reserve(result, totalsize)
obj = result + size_gc_header
self.init_gc_object(result, typeid, flags=0)
#
# If it is a weakref or has a lightweight destructor, record it
# (checks constant-folded).
if needs_finalizer:
self.young_objects_with_destructors.append(obj)
if contains_weakptr:
self.young_objects_with_weakrefs.append(obj)
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
def malloc_varsize_clear(self, typeid, length, size, itemsize,
offset_to_length):
size_gc_header = self.gcheaderbuilder.size_gc_header
nonvarsize = size_gc_header + size
#
# Compute the maximal length that makes the object still
# below 'nonlarge_max'. All the following logic is usually
# constant-folded because self.nonlarge_max, size and itemsize
# are all constants (the arguments are constant due to
# inlining).
maxsize = self.nonlarge_max - raw_malloc_usage(nonvarsize)
if maxsize < 0:
toobig = r_uint(0) # the nonvarsize alone is too big
elif raw_malloc_usage(itemsize):
toobig = r_uint(maxsize // raw_malloc_usage(itemsize)) + 1
else:
toobig = r_uint(sys.maxint) + 1
if r_uint(length) >= r_uint(toobig):
#
# If the total size of the object would be larger than
# 'nonlarge_max', then allocate it externally. We also
# go there if 'length' is actually negative.
obj = self.external_malloc(typeid, length, alloc_young=True)
#
else:
# With the above checks we know now that totalsize cannot be more
# than 'nonlarge_max'; in particular, the + and * cannot overflow.
totalsize = nonvarsize + itemsize * length
totalsize = llarena.round_up_for_allocation(totalsize)
#
# 'totalsize' should contain at least the GC header and
# the length word, so it should never be smaller than
# 'minimal_size_in_nursery'
ll_assert(raw_malloc_usage(totalsize) >=
raw_malloc_usage(self.minimal_size_in_nursery),
"malloc_varsize_clear(): totalsize < minimalsize")
#
# Get the memory from the nursery. If there is not enough space
# there, do a collect first.
result = self.nursery_free
self.nursery_free = result + totalsize
if self.nursery_free > self.nursery_top:
result = self.collect_and_reserve(result, totalsize)
#
# Build the object.
llarena.arena_reserve(result, totalsize)
self.init_gc_object(result, typeid, flags=0)
#
# Set the length and return the object.
obj = result + size_gc_header
(obj + offset_to_length).signed[0] = length
#
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
def malloc_fixed_or_varsize_nonmovable(self, typeid, length):
# length==0 for fixedsize
obj = self.external_malloc(typeid, length, alloc_young=True)
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
def collect(self, gen=1):
"""Do a minor (gen=0) or major (gen>0) collection."""
self.minor_collection()
if gen > 0:
self.major_collection()
def move_nursery_top(self, totalsize):
size = self.nursery_cleanup
ll_assert(self.nursery_real_top - self.nursery_top >= size,
"nursery_cleanup not a divisor of nursery_size - initial_cleanup")
ll_assert(llmemory.raw_malloc_usage(totalsize) <= size,
"totalsize > nursery_cleanup")
llarena.arena_reset(self.nursery_top, size, 2)
self.nursery_top += size
move_nursery_top._always_inline_ = True
def collect_and_reserve(self, prev_result, totalsize):
"""To call when nursery_free overflows nursery_top.
First check if the nursery_top is the real top, otherwise we
can just move the top of one cleanup and continue
Do a minor collection, and possibly also a major collection,
and finally reserve 'totalsize' bytes at the start of the
now-empty nursery.
"""
if self.nursery_top < self.nursery_real_top:
self.move_nursery_top(totalsize)
return prev_result
self.minor_collection()
#
if self.get_total_memory_used() > self.next_major_collection_threshold:
self.major_collection()
#
# The nursery might not be empty now, because of
# execute_finalizers(). If it is almost full again,
# we need to fix it with another call to minor_collection().
if self.nursery_free + totalsize > self.nursery_top:
#
if self.nursery_free + totalsize > self.nursery_real_top:
self.minor_collection()
# then the nursery is empty
else:
# we just need to clean up a bit more of the nursery
self.move_nursery_top(totalsize)
#
result = self.nursery_free
self.nursery_free = result + totalsize
ll_assert(self.nursery_free <= self.nursery_top, "nursery overflow")
#
if self.debug_tiny_nursery >= 0: # for debugging
if self.nursery_top - self.nursery_free > self.debug_tiny_nursery:
self.nursery_free = self.nursery_top - self.debug_tiny_nursery
#
return result
collect_and_reserve._dont_inline_ = True
# XXX kill alloc_young and make it always True
def external_malloc(self, typeid, length, alloc_young):
"""Allocate a large object using the ArenaCollection or
raw_malloc(), possibly as an object with card marking enabled,
if it has gc pointers in its var-sized part. 'length' should be
specified as 0 if the object is not varsized. The returned
object is fully initialized and zero-filled."""
#
# Here we really need a valid 'typeid', not 0 (as the JIT might
# try to send us if there is still a bug).
ll_assert(bool(self.combine(typeid, 0)),
"external_malloc: typeid == 0")
#
# Compute the total size, carefully checking for overflows.
size_gc_header = self.gcheaderbuilder.size_gc_header
nonvarsize = size_gc_header + self.fixed_size(typeid)
if length == 0:
# this includes the case of fixed-size objects, for which we
# should not even ask for the varsize_item_sizes().
totalsize = nonvarsize
elif length > 0:
# var-sized allocation with at least one item
itemsize = self.varsize_item_sizes(typeid)
try:
varsize = ovfcheck(itemsize * length)
totalsize = ovfcheck(nonvarsize + varsize)
except OverflowError:
raise MemoryError
else:
# negative length! This likely comes from an overflow
# earlier. We will just raise MemoryError here.
raise MemoryError
#
# If somebody calls this function a lot, we must eventually
# force a full collection.
if (float(self.get_total_memory_used()) + raw_malloc_usage(totalsize) >
self.next_major_collection_threshold):
self.minor_collection()
self.major_collection(raw_malloc_usage(totalsize))
#
# Check if the object would fit in the ArenaCollection.
# Also, an object allocated from ArenaCollection must be old.
if (raw_malloc_usage(totalsize) <= self.small_request_threshold
and not alloc_young):
#
# Yes. Round up 'totalsize' (it cannot overflow and it
# must remain <= self.small_request_threshold.)
totalsize = llarena.round_up_for_allocation(totalsize)
ll_assert(raw_malloc_usage(totalsize) <=
self.small_request_threshold,
"rounding up made totalsize > small_request_threshold")
#
# Allocate from the ArenaCollection and clear the memory returned.
result = self.ac.malloc(totalsize)
llmemory.raw_memclear(result, totalsize)
#
extra_flags = GCFLAG_TRACK_YOUNG_PTRS
#
else:
# No, so proceed to allocate it externally with raw_malloc().
# Check if we need to introduce the card marker bits area.
if (self.card_page_indices <= 0 # <- this check is constant-folded
or not self.has_gcptr_in_varsize(typeid) or
raw_malloc_usage(totalsize) <= self.nonlarge_max):
#
# In these cases, we don't want a card marker bits area.
# This case also includes all fixed-size objects.
cardheadersize = 0
extra_flags = 0
#
else:
# Reserve N extra words containing card bits before the object.
extra_words = self.card_marking_words_for_length(length)
cardheadersize = WORD * extra_words
extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS
# if 'alloc_young', then we also immediately set
# GCFLAG_CARDS_SET, but without adding the object to
# 'old_objects_with_cards_set'. In this way it should
# never be added to that list as long as it is young.
if alloc_young:
extra_flags |= GCFLAG_CARDS_SET
#
# Detect very rare cases of overflows
if raw_malloc_usage(totalsize) > (sys.maxint - (WORD-1)
- cardheadersize):
raise MemoryError("rare case of overflow")
#
# Now we know that the following computations cannot overflow.
# Note that round_up_for_allocation() is also needed to get the
# correct number added to 'rawmalloced_total_size'.
allocsize = (cardheadersize + raw_malloc_usage(
llarena.round_up_for_allocation(totalsize)))
#
# Allocate the object using arena_malloc(), which we assume here
# is just the same as raw_malloc(), but allows the extra
# flexibility of saying that we have extra words in the header.
# The memory returned is cleared by a raw_memclear().
arena = llarena.arena_malloc(allocsize, 2)
if not arena:
raise MemoryError("cannot allocate large object")
#
# Reserve the card mark bits as a list of single bytes
# (the loop is empty in C).
i = 0
while i < cardheadersize:
llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char))
i += 1
#
# Reserve the actual object. (This is also a no-op in C).
result = arena + cardheadersize
llarena.arena_reserve(result, totalsize)
#
# Record the newly allocated object and its full malloced size.
# The object is young or old depending on the argument.
self.rawmalloced_total_size += r_uint(allocsize)
if alloc_young:
if not self.young_rawmalloced_objects:
self.young_rawmalloced_objects = self.AddressDict()
self.young_rawmalloced_objects.add(result + size_gc_header)
else:
self.old_rawmalloced_objects.append(result + size_gc_header)
extra_flags |= GCFLAG_TRACK_YOUNG_PTRS
#
# Common code to fill the header and length of the object.
self.init_gc_object(result, typeid, extra_flags)
if self.is_varsize(typeid):
offset_to_length = self.varsize_offset_to_length(typeid)
(result + size_gc_header + offset_to_length).signed[0] = length
return result + size_gc_header
# ----------
# Other functions in the GC API
def set_max_heap_size(self, size):
self.max_heap_size = float(size)
if self.max_heap_size > 0.0:
if self.max_heap_size < self.next_major_collection_initial:
self.next_major_collection_initial = self.max_heap_size
if self.max_heap_size < self.next_major_collection_threshold:
self.next_major_collection_threshold = self.max_heap_size
def raw_malloc_memory_pressure(self, sizehint):
self.next_major_collection_threshold -= sizehint
if self.next_major_collection_threshold < 0:
# cannot trigger a full collection now, but we can ensure
# that one will occur very soon
self.nursery_top = self.nursery_real_top
self.nursery_free = self.nursery_real_top
def can_optimize_clean_setarrayitems(self):
if self.card_page_indices > 0:
return False
return MovingGCBase.can_optimize_clean_setarrayitems(self)
def can_move(self, obj):
"""Overrides the parent can_move()."""
return self.is_in_nursery(obj)
def shrink_array(self, obj, smallerlength):
#
# Only objects in the nursery can be "resized". Resizing them
# means recording that they have a smaller size, so that when
# moved out of the nursery, they will consume less memory.
# In particular, an array with GCFLAG_HAS_CARDS is never resized.
# Also, a nursery object with GCFLAG_HAS_SHADOW is not resized
# either, as this would potentially loose part of the memory in
# the already-allocated shadow.
if not self.is_in_nursery(obj):
return False
if self.header(obj).tid & GCFLAG_HAS_SHADOW:
return False
#
size_gc_header = self.gcheaderbuilder.size_gc_header
typeid = self.get_type_id(obj)
totalsmallersize = (
size_gc_header + self.fixed_size(typeid) +
self.varsize_item_sizes(typeid) * smallerlength)
llarena.arena_shrink_obj(obj - size_gc_header, totalsmallersize)
#
offset_to_length = self.varsize_offset_to_length(typeid)
(obj + offset_to_length).signed[0] = smallerlength
return True
# ----------
# Simple helpers
def get_type_id(self, obj):
tid = self.header(obj).tid
return llop.extract_ushort(llgroup.HALFWORD, tid)
def combine(self, typeid16, flags):
return llop.combine_ushort(lltype.Signed, typeid16, flags)
def init_gc_object(self, addr, typeid16, flags=0):
# The default 'flags' is zero. The flags GCFLAG_NO_xxx_PTRS
# have been chosen to allow 'flags' to be zero in the common
# case (hence the 'NO' in their name).
hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
hdr.tid = self.combine(typeid16, flags)
def init_gc_object_immortal(self, addr, typeid16, flags=0):
# For prebuilt GC objects, the flags must contain
# GCFLAG_NO_xxx_PTRS, at least initially.
flags |= GCFLAG_NO_HEAP_PTRS | GCFLAG_TRACK_YOUNG_PTRS
self.init_gc_object(addr, typeid16, flags)
def is_in_nursery(self, addr):
ll_assert(llmemory.cast_adr_to_int(addr) & 1 == 0,
"odd-valued (i.e. tagged) pointer unexpected here")
return self.nursery <= addr < self.nursery_real_top
def appears_to_be_young(self, addr):
# "is a valid addr to a young object?"
# but it's ok to occasionally return True accidentally.
# Maybe the best implementation would be a bloom filter
# of some kind instead of the dictionary lookup that is
# sometimes done below. But the expected common answer
# is "Yes" because addr points to the nursery, so it may
# not be useful to optimize the other case too much.
#
# First, if 'addr' appears to be a pointer to some place within
# the nursery, return True
if not self.translated_to_c:
# When non-translated, filter out tagged pointers explicitly.
# When translated, it may occasionally give a wrong answer
# of True if 'addr' is a tagged pointer with just the wrong value.
if not self.is_valid_gc_object(addr):
return False
if self.nursery <= addr < self.nursery_real_top:
return True # addr is in the nursery
#
# Else, it may be in the set 'young_rawmalloced_objects'
return (bool(self.young_rawmalloced_objects) and
self.young_rawmalloced_objects.contains(addr))
appears_to_be_young._always_inline_ = True
def debug_is_old_object(self, addr):
return (self.is_valid_gc_object(addr)
and not self.appears_to_be_young(addr))
def is_forwarded(self, obj):
"""Returns True if the nursery obj is marked as forwarded.
Implemented a bit obscurely by checking an unrelated flag
that can never be set on a young object -- except if tid == -42.
"""
assert self.is_in_nursery(obj)
tid = self.header(obj).tid
result = (tid & GCFLAG_FINALIZATION_ORDERING != 0)
if result:
ll_assert(tid == -42, "bogus header for young obj")
else:
ll_assert(bool(tid), "bogus header (1)")
ll_assert(tid & -_GCFLAG_FIRST_UNUSED == 0, "bogus header (2)")
return result
def get_forwarding_address(self, obj):
return llmemory.cast_adr_to_ptr(obj, FORWARDSTUBPTR).forw
def get_possibly_forwarded_type_id(self, obj):
if self.is_in_nursery(obj) and self.is_forwarded(obj):
obj = self.get_forwarding_address(obj)
return self.get_type_id(obj)
def get_total_memory_used(self):
"""Return the total memory used, not counting any object in the
nursery: only objects in the ArenaCollection or raw-malloced.
"""
return self.ac.total_memory_used + self.rawmalloced_total_size
def card_marking_words_for_length(self, length):
# --- Unoptimized version:
#num_bits = ((length-1) >> self.card_page_shift) + 1
#return (num_bits + (LONG_BIT - 1)) >> LONG_BIT_SHIFT
# --- Optimized version:
return intmask(
((r_uint(length) + r_uint((LONG_BIT << self.card_page_shift) - 1)) >>
(self.card_page_shift + LONG_BIT_SHIFT)))
def card_marking_bytes_for_length(self, length):
# --- Unoptimized version:
#num_bits = ((length-1) >> self.card_page_shift) + 1
#return (num_bits + 7) >> 3
# --- Optimized version:
return intmask(
((r_uint(length) + r_uint((8 << self.card_page_shift) - 1)) >>
(self.card_page_shift + 3)))
def debug_check_consistency(self):
if self.DEBUG:
ll_assert(not self.young_rawmalloced_objects,
"young raw-malloced objects in a major collection")
ll_assert(not self.young_objects_with_weakrefs.non_empty(),
"young objects with weakrefs in a major collection")
MovingGCBase.debug_check_consistency(self)
def debug_check_object(self, obj):
# after a minor or major collection, no object should be in the nursery
ll_assert(not self.is_in_nursery(obj),
"object in nursery after collection")
# similarily, all objects should have this flag, except if they
# don't have any GC pointer
typeid = self.get_type_id(obj)
if self.has_gcptr(typeid):
ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0,
"missing GCFLAG_TRACK_YOUNG_PTRS")
# the GCFLAG_VISITED should not be set between collections
ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0,
"unexpected GCFLAG_VISITED")
# the GCFLAG_FINALIZATION_ORDERING should not be set between coll.
ll_assert(self.header(obj).tid & GCFLAG_FINALIZATION_ORDERING == 0,
"unexpected GCFLAG_FINALIZATION_ORDERING")
# the GCFLAG_CARDS_SET should not be set between collections
ll_assert(self.header(obj).tid & GCFLAG_CARDS_SET == 0,
"unexpected GCFLAG_CARDS_SET")
# if the GCFLAG_HAS_CARDS is set, check that all bits are zero now
if self.header(obj).tid & GCFLAG_HAS_CARDS:
if self.card_page_indices <= 0:
ll_assert(False, "GCFLAG_HAS_CARDS but not using card marking")
return
typeid = self.get_type_id(obj)
ll_assert(self.has_gcptr_in_varsize(typeid),
"GCFLAG_HAS_CARDS but not has_gcptr_in_varsize")
ll_assert(self.header(obj).tid & GCFLAG_NO_HEAP_PTRS == 0,
"GCFLAG_HAS_CARDS && GCFLAG_NO_HEAP_PTRS")
offset_to_length = self.varsize_offset_to_length(typeid)
length = (obj + offset_to_length).signed[0]
extra_words = self.card_marking_words_for_length(length)
#
size_gc_header = self.gcheaderbuilder.size_gc_header
p = llarena.getfakearenaaddress(obj - size_gc_header)
i = extra_words * WORD
while i > 0:
p -= 1
ll_assert(p.char[0] == '\x00',
"the card marker bits are not cleared")
i -= 1
# ----------
# Write barrier
# for the JIT: a minimal description of the write_barrier() method
# (the JIT assumes it is of the shape
# "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()")
JIT_WB_IF_FLAG = GCFLAG_TRACK_YOUNG_PTRS
# for the JIT to generate custom code corresponding to the array
# write barrier for the simplest case of cards. If JIT_CARDS_SET
# is already set on an object, it will execute code like this:
# MOV eax, index
# SHR eax, JIT_WB_CARD_PAGE_SHIFT
# XOR eax, -8
# BTS [object], eax
if TRANSLATION_PARAMS['card_page_indices'] > 0:
JIT_WB_CARDS_SET = GCFLAG_CARDS_SET
JIT_WB_CARD_PAGE_SHIFT = 1
while ((1 << JIT_WB_CARD_PAGE_SHIFT) !=
TRANSLATION_PARAMS['card_page_indices']):
JIT_WB_CARD_PAGE_SHIFT += 1
@classmethod
def JIT_max_size_of_young_obj(cls):
return cls.TRANSLATION_PARAMS['large_object']
@classmethod
def JIT_minimal_size_in_nursery(cls):
return cls.minimal_size_in_nursery
def write_barrier(self, addr_struct):
if self.header(addr_struct).tid & GCFLAG_TRACK_YOUNG_PTRS:
self.remember_young_pointer(addr_struct)
def write_barrier_from_array(self, addr_array, index):
if self.header(addr_array).tid & GCFLAG_TRACK_YOUNG_PTRS:
if self.card_page_indices > 0: # <- constant-folded
self.remember_young_pointer_from_array2(addr_array, index)
else:
self.remember_young_pointer(addr_array)
def _init_writebarrier_logic(self):
DEBUG = self.DEBUG
# The purpose of attaching remember_young_pointer to the instance
# instead of keeping it as a regular method is to
# make the code in write_barrier() marginally smaller
# (which is important because it is inlined *everywhere*).
def remember_young_pointer(addr_struct):
# 'addr_struct' is the address of the object in which we write.
# We know that 'addr_struct' has GCFLAG_TRACK_YOUNG_PTRS so far.
#
if DEBUG: # note: PYPY_GC_DEBUG=1 does not enable this
ll_assert(self.debug_is_old_object(addr_struct) or
self.header(addr_struct).tid & GCFLAG_HAS_CARDS != 0,
"young object with GCFLAG_TRACK_YOUNG_PTRS and no cards")
#
# We need to remove the flag GCFLAG_TRACK_YOUNG_PTRS and add
# the object to the list 'old_objects_pointing_to_young'.
# We know that 'addr_struct' cannot be in the nursery,
# because nursery objects never have the flag
# GCFLAG_TRACK_YOUNG_PTRS to start with. Note that in
# theory we don't need to do that if the pointer that we're
# writing into the object isn't pointing to a young object.
# However, it isn't really a win, because then sometimes
# we're going to call this function a lot of times for the
# same object; moreover we'd need to pass the 'newvalue' as
# an argument here. The JIT has always called a
# 'newvalue'-less version, too.
self.old_objects_pointing_to_young.append(addr_struct)
objhdr = self.header(addr_struct)
objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
#
# Second part: if 'addr_struct' is actually a prebuilt GC
# object and it's the first time we see a write to it, we
# add it to the list 'prebuilt_root_objects'.
if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
self.prebuilt_root_objects.append(addr_struct)
remember_young_pointer._dont_inline_ = True
self.remember_young_pointer = remember_young_pointer
#
if self.card_page_indices > 0:
self._init_writebarrier_with_card_marker()
def _init_writebarrier_with_card_marker(self):
DEBUG = self.DEBUG
def remember_young_pointer_from_array2(addr_array, index):
# 'addr_array' is the address of the object in which we write,
# which must have an array part; 'index' is the index of the
# item that is (or contains) the pointer that we write.
# We know that 'addr_array' has GCFLAG_TRACK_YOUNG_PTRS so far.
#
objhdr = self.header(addr_array)
if objhdr.tid & GCFLAG_HAS_CARDS == 0:
#
if DEBUG: # note: PYPY_GC_DEBUG=1 does not enable this
ll_assert(self.debug_is_old_object(addr_array),
"young array with no card but GCFLAG_TRACK_YOUNG_PTRS")
#
# no cards, use default logic. Mostly copied from above.
self.old_objects_pointing_to_young.append(addr_array)
objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
self.prebuilt_root_objects.append(addr_array)
return
#
# 'addr_array' is a raw_malloc'ed array with card markers
# in front. Compute the index of the bit to set:
bitindex = index >> self.card_page_shift
byteindex = bitindex >> 3
bitmask = 1 << (bitindex & 7)
#
# If the bit is already set, leave now.
addr_byte = self.get_card(addr_array, byteindex)
byte = ord(addr_byte.char[0])
if byte & bitmask:
return
#
# We set the flag (even if the newly written address does not
# actually point to the nursery, which seems to be ok -- actually
# it seems more important that remember_young_pointer_from_array2()
# does not take 3 arguments).
addr_byte.char[0] = chr(byte | bitmask)
#
if objhdr.tid & GCFLAG_CARDS_SET == 0:
self.old_objects_with_cards_set.append(addr_array)
objhdr.tid |= GCFLAG_CARDS_SET
remember_young_pointer_from_array2._dont_inline_ = True
assert self.card_page_indices > 0
self.remember_young_pointer_from_array2 = (
remember_young_pointer_from_array2)
def jit_remember_young_pointer_from_array(addr_array):
# minimal version of the above, with just one argument,
# called by the JIT when GCFLAG_TRACK_YOUNG_PTRS is set
# but GCFLAG_CARDS_SET is cleared. This tries to set
# GCFLAG_CARDS_SET if possible; otherwise, it falls back
# to remember_young_pointer().
objhdr = self.header(addr_array)
if objhdr.tid & GCFLAG_HAS_CARDS:
self.old_objects_with_cards_set.append(addr_array)
objhdr.tid |= GCFLAG_CARDS_SET
else:
self.remember_young_pointer(addr_array)
self.jit_remember_young_pointer_from_array = (
jit_remember_young_pointer_from_array)
def get_card(self, obj, byteindex):
size_gc_header = self.gcheaderbuilder.size_gc_header
addr_byte = obj - size_gc_header
return llarena.getfakearenaaddress(addr_byte) + (~byteindex)
def writebarrier_before_copy(self, source_addr, dest_addr,
source_start, dest_start, length):
""" This has the same effect as calling writebarrier over
each element in dest copied from source, except it might reset
one of the following flags a bit too eagerly, which means we'll have
a bit more objects to track, but being on the safe side.
"""
source_hdr = self.header(source_addr)
dest_hdr = self.header(dest_addr)
if dest_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
return True
# ^^^ a fast path of write-barrier
#
if source_hdr.tid & GCFLAG_HAS_CARDS != 0:
#
if source_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
# The source object may have random young pointers.
# Return False to mean "do it manually in ll_arraycopy".
return False
#
if source_hdr.tid & GCFLAG_CARDS_SET == 0:
# The source object has no young pointers at all. Done.
return True
#
if dest_hdr.tid & GCFLAG_HAS_CARDS == 0:
# The dest object doesn't have cards. Do it manually.
return False
#
if source_start != 0 or dest_start != 0:
# Misaligned. Do it manually.
return False
#
self.manually_copy_card_bits(source_addr, dest_addr, length)
return True
#
if source_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
# there might be in source a pointer to a young object
self.old_objects_pointing_to_young.append(dest_addr)
dest_hdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
#
if dest_hdr.tid & GCFLAG_NO_HEAP_PTRS:
if source_hdr.tid & GCFLAG_NO_HEAP_PTRS == 0:
dest_hdr.tid &= ~GCFLAG_NO_HEAP_PTRS
self.prebuilt_root_objects.append(dest_addr)
return True
def manually_copy_card_bits(self, source_addr, dest_addr, length):
# manually copy the individual card marks from source to dest
assert self.card_page_indices > 0
bytes = self.card_marking_bytes_for_length(length)
#
anybyte = 0
i = 0
while i < bytes:
addr_srcbyte = self.get_card(source_addr, i)
addr_dstbyte = self.get_card(dest_addr, i)
byte = ord(addr_srcbyte.char[0])
anybyte |= byte
addr_dstbyte.char[0] = chr(ord(addr_dstbyte.char[0]) | byte)
i += 1
#
if anybyte:
dest_hdr = self.header(dest_addr)
if dest_hdr.tid & GCFLAG_CARDS_SET == 0:
self.old_objects_with_cards_set.append(dest_addr)
dest_hdr.tid |= GCFLAG_CARDS_SET
def register_finalizer(self, fq_index, gcobj):
from rpython.rtyper.lltypesystem import rffi
obj = llmemory.cast_ptr_to_adr(gcobj)
fq_index = rffi.cast(llmemory.Address, fq_index)
self.probably_young_objects_with_finalizers.append(obj)
self.probably_young_objects_with_finalizers.append(fq_index)
# ----------
# Nursery collection
def minor_collection(self):
"""Perform a minor collection: find the objects from the nursery
that remain alive and move them out."""
#
debug_start("gc-minor")
#
# Before everything else, remove from 'old_objects_pointing_to_young'
# the young arrays.
if self.young_rawmalloced_objects:
self.remove_young_arrays_from_old_objects_pointing_to_young()
#
# First, find the roots that point to young objects. All nursery
# objects found are copied out of the nursery, and the occasional
# young raw-malloced object is flagged with GCFLAG_VISITED.
# Note that during this step, we ignore references to further
# young objects; only objects directly referenced by roots
# are copied out or flagged. They are also added to the list
# 'old_objects_pointing_to_young'.
self.collect_roots_in_nursery()
#
# visit the "probably young" objects with finalizers. They
# always all survive.
if self.probably_young_objects_with_finalizers.non_empty():
self.deal_with_young_objects_with_finalizers()
#
while True:
# If we are using card marking, do a partial trace of the arrays
# that are flagged with GCFLAG_CARDS_SET.
if self.card_page_indices > 0:
self.collect_cardrefs_to_nursery()
#
# Now trace objects from 'old_objects_pointing_to_young'.
# All nursery objects they reference are copied out of the
# nursery, and again added to 'old_objects_pointing_to_young'.
# All young raw-malloced object found are flagged GCFLAG_VISITED.
# We proceed until 'old_objects_pointing_to_young' is empty.
self.collect_oldrefs_to_nursery()
#
# We have to loop back if collect_oldrefs_to_nursery caused
# new objects to show up in old_objects_with_cards_set
if self.card_page_indices > 0:
if self.old_objects_with_cards_set.non_empty():
continue
break
#
# Now all live nursery objects should be out. Update the young
# weakrefs' targets.
if self.young_objects_with_weakrefs.non_empty():
self.invalidate_young_weakrefs()
if self.young_objects_with_destructors.non_empty():
self.deal_with_young_objects_with_destructors()
#
# Clear this mapping.
if self.nursery_objects_shadows.length() > 0:
self.nursery_objects_shadows.clear()
#
# Walk the list of young raw-malloced objects, and either free
# them or make them old.
if self.young_rawmalloced_objects:
self.free_young_rawmalloced_objects()
#
# All live nursery objects are out, and the rest dies. Fill
# the nursery up to the cleanup point with zeros
llarena.arena_reset(self.nursery, self.nursery_size, 0)
llarena.arena_reset(self.nursery, self.initial_cleanup, 2)
self.debug_rotate_nursery()
self.nursery_free = self.nursery
self.nursery_top = self.nursery + self.initial_cleanup
self.nursery_real_top = self.nursery + self.nursery_size
#
debug_print("minor collect, total memory used:",
self.get_total_memory_used())
if self.DEBUG >= 2:
self.debug_check_consistency() # expensive!
debug_stop("gc-minor")
def collect_roots_in_nursery(self):
# we don't need to trace prebuilt GcStructs during a minor collect:
# if a prebuilt GcStruct contains a pointer to a young object,
# then the write_barrier must have ensured that the prebuilt
# GcStruct is in the list self.old_objects_pointing_to_young.
debug_start("gc-minor-walkroots")
self.root_walker.walk_roots(
MiniMarkGC._trace_drag_out1, # stack roots
MiniMarkGC._trace_drag_out1, # static in prebuilt non-gc
None, # static in prebuilt gc
is_minor=True)
debug_stop("gc-minor-walkroots")
def collect_cardrefs_to_nursery(self):
size_gc_header = self.gcheaderbuilder.size_gc_header
oldlist = self.old_objects_with_cards_set
while oldlist.non_empty():
obj = oldlist.pop()
#
# Remove the GCFLAG_CARDS_SET flag.
ll_assert(self.header(obj).tid & GCFLAG_CARDS_SET != 0,
"!GCFLAG_CARDS_SET but object in 'old_objects_with_cards_set'")
self.header(obj).tid &= ~GCFLAG_CARDS_SET
#
# Get the number of card marker bytes in the header.
typeid = self.get_type_id(obj)
offset_to_length = self.varsize_offset_to_length(typeid)
length = (obj + offset_to_length).signed[0]
bytes = self.card_marking_bytes_for_length(length)
p = llarena.getfakearenaaddress(obj - size_gc_header)
#
# If the object doesn't have GCFLAG_TRACK_YOUNG_PTRS, then it
# means that it is in 'old_objects_pointing_to_young' and
# will be fully traced by collect_oldrefs_to_nursery() just
# afterwards.
if self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
#
# In that case, we just have to reset all card bits.
while bytes > 0:
p -= 1
p.char[0] = '\x00'
bytes -= 1
#
else:
# Walk the bytes encoding the card marker bits, and for
# each bit set, call trace_and_drag_out_of_nursery_partial().
interval_start = 0
while bytes > 0:
p -= 1
cardbyte = ord(p.char[0])
p.char[0] = '\x00' # reset the bits
bytes -= 1
next_byte_start = interval_start + 8*self.card_page_indices
#
while cardbyte != 0:
interval_stop = interval_start + self.card_page_indices
#
if cardbyte & 1:
if interval_stop > length:
interval_stop = length
ll_assert(cardbyte <= 1 and bytes == 0,
"premature end of object")
self.trace_and_drag_out_of_nursery_partial(
obj, interval_start, interval_stop)
#
interval_start = interval_stop
cardbyte >>= 1
interval_start = next_byte_start
def collect_oldrefs_to_nursery(self):
# Follow the old_objects_pointing_to_young list and move the
# young objects they point to out of the nursery.
oldlist = self.old_objects_pointing_to_young
while oldlist.non_empty():
obj = oldlist.pop()
#
# Check that the flags are correct: we must not have
# GCFLAG_TRACK_YOUNG_PTRS so far.
ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS == 0,
"old_objects_pointing_to_young contains obj with "
"GCFLAG_TRACK_YOUNG_PTRS")
#
# Add the flag GCFLAG_TRACK_YOUNG_PTRS. All live objects should
# have this flag set after a nursery collection.
self.header(obj).tid |= GCFLAG_TRACK_YOUNG_PTRS
#
# Trace the 'obj' to replace pointers to nursery with pointers
# outside the nursery, possibly forcing nursery objects out
# and adding them to 'old_objects_pointing_to_young' as well.
self.trace_and_drag_out_of_nursery(obj)
def trace_and_drag_out_of_nursery(self, obj):
"""obj must not be in the nursery. This copies all the
young objects it references out of the nursery.
"""
self.trace(obj, self._trace_drag_out, None)
def trace_and_drag_out_of_nursery_partial(self, obj, start, stop):
"""Like trace_and_drag_out_of_nursery(), but limited to the array
indices in range(start, stop).
"""
ll_assert(start < stop, "empty or negative range "
"in trace_and_drag_out_of_nursery_partial()")
#print 'trace_partial:', start, stop, '\t', obj
self.trace_partial(obj, start, stop, self._trace_drag_out, None)
def _trace_drag_out1(self, root):
self._trace_drag_out(root, None)
def _trace_drag_out(self, root, ignored):
obj = root.address[0]
#print '_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj)
#
# If 'obj' is not in the nursery, nothing to change -- expect
# that we must set GCFLAG_VISITED on young raw-malloced objects.
if not self.is_in_nursery(obj):
# cache usage trade-off: I think that it is a better idea to
# check if 'obj' is in young_rawmalloced_objects with an access
# to this (small) dictionary, rather than risk a lot of cache
# misses by reading a flag in the header of all the 'objs' that
# arrive here.
if (bool(self.young_rawmalloced_objects)
and self.young_rawmalloced_objects.contains(obj)):
self._visit_young_rawmalloced_object(obj)
return
#
size_gc_header = self.gcheaderbuilder.size_gc_header
if self.header(obj).tid & GCFLAG_HAS_SHADOW == 0:
#
# Common case: 'obj' was not already forwarded (otherwise
# tid == -42, containing all flags), and it doesn't have the
# HAS_SHADOW flag either. We must move it out of the nursery,
# into a new nonmovable location.
totalsize = size_gc_header + self.get_size(obj)
newhdr = self._malloc_out_of_nursery(totalsize)
#
elif self.is_forwarded(obj):
#
# 'obj' was already forwarded. Change the original reference
# to point to its forwarding address, and we're done.
root.address[0] = self.get_forwarding_address(obj)
return
#
else:
# First visit to an object that has already a shadow.
newobj = self.nursery_objects_shadows.get(obj)
ll_assert(newobj != NULL, "GCFLAG_HAS_SHADOW but no shadow found")
newhdr = newobj - size_gc_header
#
# Remove the flag GCFLAG_HAS_SHADOW, so that it doesn't get
# copied to the shadow itself.
self.header(obj).tid &= ~GCFLAG_HAS_SHADOW
#
totalsize = size_gc_header + self.get_size(obj)
#
# Copy it. Note that references to other objects in the
# nursery are kept unchanged in this step.
llmemory.raw_memcopy(obj - size_gc_header, newhdr, totalsize)
#
# Set the old object's tid to -42 (containing all flags) and
# replace the old object's content with the target address.
# A bit of no-ops to convince llarena that we are changing
# the layout, in non-translated versions.
typeid = self.get_type_id(obj)
obj = llarena.getfakearenaaddress(obj)
llarena.arena_reset(obj - size_gc_header, totalsize, 0)
llarena.arena_reserve(obj - size_gc_header,
size_gc_header + llmemory.sizeof(FORWARDSTUB))
self.header(obj).tid = -42
newobj = newhdr + size_gc_header
llmemory.cast_adr_to_ptr(obj, FORWARDSTUBPTR).forw = newobj
#
# Change the original pointer to this object.
root.address[0] = newobj
#
# Add the newobj to the list 'old_objects_pointing_to_young',
# because it can contain further pointers to other young objects.
# We will fix such references to point to the copy of the young
# objects when we walk 'old_objects_pointing_to_young'.
if self.has_gcptr(typeid):
# we only have to do it if we have any gcptrs
self.old_objects_pointing_to_young.append(newobj)
_trace_drag_out._always_inline_ = True
def _visit_young_rawmalloced_object(self, obj):
# 'obj' points to a young, raw-malloced object.
# Any young rawmalloced object never seen by the code here
# will end up without GCFLAG_VISITED, and be freed at the
# end of the current minor collection. Note that there was
# a bug in which dying young arrays with card marks would
# still be scanned before being freed, keeping a lot of
# objects unnecessarily alive.
hdr = self.header(obj)
if hdr.tid & GCFLAG_VISITED:
return
hdr.tid |= GCFLAG_VISITED
#
# we just made 'obj' old, so we need to add it to the correct lists
added_somewhere = False
#
if hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
self.old_objects_pointing_to_young.append(obj)
added_somewhere = True
#
if hdr.tid & GCFLAG_HAS_CARDS != 0:
ll_assert(hdr.tid & GCFLAG_CARDS_SET != 0,
"young array: GCFLAG_HAS_CARDS without GCFLAG_CARDS_SET")
self.old_objects_with_cards_set.append(obj)
added_somewhere = True
#
ll_assert(added_somewhere, "wrong flag combination on young array")
def _malloc_out_of_nursery(self, totalsize):
"""Allocate non-movable memory for an object of the given
'totalsize' that lives so far in the nursery."""
if raw_malloc_usage(totalsize) <= self.small_request_threshold:
# most common path
return self.ac.malloc(totalsize)
else:
# for nursery objects that are not small
return self._malloc_out_of_nursery_nonsmall(totalsize)
_malloc_out_of_nursery._always_inline_ = True
def _malloc_out_of_nursery_nonsmall(self, totalsize):
# 'totalsize' should be aligned.
ll_assert(raw_malloc_usage(totalsize) & (WORD-1) == 0,
"misaligned totalsize in _malloc_out_of_nursery_nonsmall")
#
arena = llarena.arena_malloc(raw_malloc_usage(totalsize), False)
if not arena:
raise MemoryError("cannot allocate object")
llarena.arena_reserve(arena, totalsize)
#
size_gc_header = self.gcheaderbuilder.size_gc_header
self.rawmalloced_total_size += r_uint(raw_malloc_usage(totalsize))
self.old_rawmalloced_objects.append(arena + size_gc_header)
return arena
def free_young_rawmalloced_objects(self):
self.young_rawmalloced_objects.foreach(
self._free_young_rawmalloced_obj, None)
self.young_rawmalloced_objects.delete()
self.young_rawmalloced_objects = self.null_address_dict()
def _free_young_rawmalloced_obj(self, obj, ignored1, ignored2):
# If 'obj' has GCFLAG_VISITED, it was seen by _trace_drag_out
# and survives. Otherwise, it dies.
self.free_rawmalloced_object_if_unvisited(obj)
def remove_young_arrays_from_old_objects_pointing_to_young(self):
old = self.old_objects_pointing_to_young
new = self.AddressStack()
while old.non_empty():
obj = old.pop()
if not self.young_rawmalloced_objects.contains(obj):
new.append(obj)
# an extra copy, to avoid assignments to
# 'self.old_objects_pointing_to_young'
while new.non_empty():
old.append(new.pop())
new.delete()
# ----------
# Full collection
def major_collection(self, reserving_size=0):
"""Do a major collection. Only for when the nursery is empty."""
#
debug_start("gc-collect")
debug_print()
debug_print(".----------- Full collection ------------------")
debug_print("| used before collection:")
debug_print("| in ArenaCollection: ",
self.ac.total_memory_used, "bytes")
debug_print("| raw_malloced: ",
self.rawmalloced_total_size, "bytes")
#
# Debugging checks
ll_assert(self.nursery_free == self.nursery,
"nursery not empty in major_collection()")
self.debug_check_consistency()
#
# Note that a major collection is non-moving. The goal is only to
# find and free some of the objects allocated by the ArenaCollection.
# We first visit all objects and toggle the flag GCFLAG_VISITED on
# them, starting from the roots.
self.objects_to_trace = self.AddressStack()
self.collect_roots()
self.visit_all_objects()
#
# Finalizer support: adds the flag GCFLAG_VISITED to all objects
# with a finalizer and all objects reachable from there (and also
# moves some objects from 'objects_with_finalizers' to
# 'run_finalizers').
if self.old_objects_with_finalizers.non_empty():
self.deal_with_objects_with_finalizers()
#
self.objects_to_trace.delete()
#
# Weakref support: clear the weak pointers to dying objects
if self.old_objects_with_weakrefs.non_empty():
self.invalidate_old_weakrefs()
if self.old_objects_with_destructors.non_empty():
self.deal_with_old_objects_with_destructors()
#
# Walk all rawmalloced objects and free the ones that don't
# have the GCFLAG_VISITED flag.
self.free_unvisited_rawmalloc_objects()
#
# Ask the ArenaCollection to visit all objects. Free the ones
# that have not been visited above, and reset GCFLAG_VISITED on
# the others.
self.ac.mass_free(self._free_if_unvisited)
#
# We also need to reset the GCFLAG_VISITED on prebuilt GC objects.
self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None)
#
self.debug_check_consistency()
#
self.num_major_collects += 1
debug_print("| used after collection:")
debug_print("| in ArenaCollection: ",
self.ac.total_memory_used, "bytes")
debug_print("| raw_malloced: ",
self.rawmalloced_total_size, "bytes")
debug_print("| number of major collects: ",
self.num_major_collects)
debug_print("`----------------------------------------------")
debug_stop("gc-collect")
#
# Set the threshold for the next major collection to be when we
# have allocated 'major_collection_threshold' times more than
# we currently have -- but no more than 'max_delta' more than
# we currently have.
total_memory_used = float(self.get_total_memory_used())
bounded = self.set_major_threshold_from(
min(total_memory_used * self.major_collection_threshold,
total_memory_used + self.max_delta),
reserving_size)
#
# Max heap size: gives an upper bound on the threshold. If we
# already have at least this much allocated, raise MemoryError.
if bounded and (float(self.get_total_memory_used()) + reserving_size >=
self.next_major_collection_initial):
#
# First raise MemoryError, giving the program a chance to
# quit cleanly. It might still allocate in the nursery,
# which might eventually be emptied, triggering another
# major collect and (possibly) reaching here again with an
# even higher memory consumption. To prevent it, if it's
# the second time we are here, then abort the program.
if self.max_heap_size_already_raised:
llop.debug_fatalerror(lltype.Void,
"Using too much memory, aborting")
self.max_heap_size_already_raised = True
raise MemoryError
#
# At the end, we can execute the finalizers of the objects
# listed in 'run_finalizers'. Note that this will typically do
# more allocations.
self.execute_finalizers()
def _free_if_unvisited(self, hdr):
size_gc_header = self.gcheaderbuilder.size_gc_header
obj = hdr + size_gc_header
if self.header(obj).tid & GCFLAG_VISITED:
self.header(obj).tid &= ~GCFLAG_VISITED
return False # survives
return True # dies
def _reset_gcflag_visited(self, obj, ignored):
self.header(obj).tid &= ~GCFLAG_VISITED
def free_rawmalloced_object_if_unvisited(self, obj):
if self.header(obj).tid & GCFLAG_VISITED:
self.header(obj).tid &= ~GCFLAG_VISITED # survives
self.old_rawmalloced_objects.append(obj)
else:
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + self.get_size(obj)
allocsize = raw_malloc_usage(totalsize)
arena = llarena.getfakearenaaddress(obj - size_gc_header)
#
# Must also include the card marker area, if any
if (self.card_page_indices > 0 # <- this is constant-folded
and self.header(obj).tid & GCFLAG_HAS_CARDS):
#
# Get the length and compute the number of extra bytes
typeid = self.get_type_id(obj)
ll_assert(self.has_gcptr_in_varsize(typeid),
"GCFLAG_HAS_CARDS but not has_gcptr_in_varsize")
offset_to_length = self.varsize_offset_to_length(typeid)
length = (obj + offset_to_length).signed[0]
extra_words = self.card_marking_words_for_length(length)
arena -= extra_words * WORD
allocsize += extra_words * WORD
#
llarena.arena_free(arena)
self.rawmalloced_total_size -= r_uint(allocsize)
def free_unvisited_rawmalloc_objects(self):
list = self.old_rawmalloced_objects
self.old_rawmalloced_objects = self.AddressStack()
#
while list.non_empty():
self.free_rawmalloced_object_if_unvisited(list.pop())
#
list.delete()
def collect_roots(self):
# Collect all roots. Starts from all the objects
# from 'prebuilt_root_objects'.
self.prebuilt_root_objects.foreach(self._collect_obj,
self.objects_to_trace)
#
# Add the roots from the other sources.
self.root_walker.walk_roots(
MiniMarkGC._collect_ref_stk, # stack roots
MiniMarkGC._collect_ref_stk, # static in prebuilt non-gc structures
None) # we don't need the static in all prebuilt gc objects
#
# If we are in an inner collection caused by a call to a finalizer,
# the 'run_finalizers' objects also need to be kept alive.
self.enum_pending_finalizers(self._collect_obj,
self.objects_to_trace)
def enumerate_all_roots(self, callback, arg):
self.prebuilt_root_objects.foreach(callback, arg)
MovingGCBase.enumerate_all_roots(self, callback, arg)
enumerate_all_roots._annspecialcase_ = 'specialize:arg(1)'
@staticmethod
def _collect_obj(obj, objects_to_trace):
objects_to_trace.append(obj)
def _collect_ref_stk(self, root):
obj = root.address[0]
llop.debug_nonnull_pointer(lltype.Void, obj)
self.objects_to_trace.append(obj)
def _collect_ref_rec(self, root, ignored):
self.objects_to_trace.append(root.address[0])
def visit_all_objects(self):
pending = self.objects_to_trace
while pending.non_empty():
obj = pending.pop()
self.visit(obj)
def visit(self, obj):
#
# 'obj' is a live object. Check GCFLAG_VISITED to know if we
# have already seen it before.
#
# Moreover, we can ignore prebuilt objects with GCFLAG_NO_HEAP_PTRS.
# If they have this flag set, then they cannot point to heap
# objects, so ignoring them is fine. If they don't have this
# flag set, then the object should be in 'prebuilt_root_objects',
# and the GCFLAG_VISITED will be reset at the end of the
# collection.
hdr = self.header(obj)
if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS):
return
#
# It's the first time. We set the flag.
hdr.tid |= GCFLAG_VISITED
if not self.has_gcptr(llop.extract_ushort(llgroup.HALFWORD, hdr.tid)):
return
#
# Trace the content of the object and put all objects it references
# into the 'objects_to_trace' list.
self.trace(obj, self._collect_ref_rec, None)
# ----------
# id() and identityhash() support
def _allocate_shadow(self, obj):
size_gc_header = self.gcheaderbuilder.size_gc_header
size = self.get_size(obj)
shadowhdr = self._malloc_out_of_nursery(size_gc_header +
size)
# Initialize the shadow enough to be considered a
# valid gc object. If the original object stays
# alive at the next minor collection, it will anyway
# be copied over the shadow and overwrite the
# following fields. But if the object dies, then
# the shadow will stay around and only be freed at
# the next major collection, at which point we want
# it to look valid (but ready to be freed).
shadow = shadowhdr + size_gc_header
self.header(shadow).tid = self.header(obj).tid
typeid = self.get_type_id(obj)
if self.is_varsize(typeid):
lenofs = self.varsize_offset_to_length(typeid)
(shadow + lenofs).signed[0] = (obj + lenofs).signed[0]
#
self.header(obj).tid |= GCFLAG_HAS_SHADOW
self.nursery_objects_shadows.setitem(obj, shadow)
return shadow
def _find_shadow(self, obj):
#
# The object is not a tagged pointer, and it is still in the
# nursery. Find or allocate a "shadow" object, which is
# where the object will be moved by the next minor
# collection
if self.header(obj).tid & GCFLAG_HAS_SHADOW:
shadow = self.nursery_objects_shadows.get(obj)
ll_assert(shadow != NULL,
"GCFLAG_HAS_SHADOW but no shadow found")
else:
shadow = self._allocate_shadow(obj)
#
# The answer is the address of the shadow.
return shadow
_find_shadow._dont_inline_ = True
def id_or_identityhash(self, gcobj):
"""Implement the common logic of id() and identityhash()
of an object, given as a GCREF.
"""
obj = llmemory.cast_ptr_to_adr(gcobj)
if self.is_valid_gc_object(obj):
if self.is_in_nursery(obj):
obj = self._find_shadow(obj)
return llmemory.cast_adr_to_int(obj)
id_or_identityhash._always_inline_ = True
def id(self, gcobj):
return self.id_or_identityhash(gcobj)
def identityhash(self, gcobj):
return mangle_hash(self.id_or_identityhash(gcobj))
# ----------
# Finalizers
def deal_with_young_objects_with_destructors(self):
"""We can reasonably assume that destructors don't do
anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
while self.young_objects_with_destructors.non_empty():
obj = self.young_objects_with_destructors.pop()
if not self.is_forwarded(obj):
self.call_destructor(obj)
else:
obj = self.get_forwarding_address(obj)
self.old_objects_with_destructors.append(obj)
def deal_with_old_objects_with_destructors(self):
"""We can reasonably assume that destructors don't do
anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
new_objects = self.AddressStack()
while self.old_objects_with_destructors.non_empty():
obj = self.old_objects_with_destructors.pop()
if self.header(obj).tid & GCFLAG_VISITED:
# surviving
new_objects.append(obj)
else:
# dying
self.call_destructor(obj)
self.old_objects_with_destructors.delete()
self.old_objects_with_destructors = new_objects
def deal_with_young_objects_with_finalizers(self):
while self.probably_young_objects_with_finalizers.non_empty():
obj = self.probably_young_objects_with_finalizers.popleft()
fq_nr = self.probably_young_objects_with_finalizers.popleft()
self.singleaddr.address[0] = obj
self._trace_drag_out1(self.singleaddr)
obj = self.singleaddr.address[0]
self.old_objects_with_finalizers.append(obj)
self.old_objects_with_finalizers.append(fq_nr)
def deal_with_objects_with_finalizers(self):
# Walk over list of objects with finalizers.
# If it is not surviving, add it to the list of to-be-called
# finalizers and make it survive, to make the finalizer runnable.
# We try to run the finalizers in a "reasonable" order, like
# CPython does. The details of this algorithm are in
# pypy/doc/discussion/finalizer-order.txt.
new_with_finalizer = self.AddressDeque()
marked = self.AddressDeque()
pending = self.AddressStack()
self.tmpstack = self.AddressStack()
while self.old_objects_with_finalizers.non_empty():
x = self.old_objects_with_finalizers.popleft()
fq_nr = self.old_objects_with_finalizers.popleft()
ll_assert(self._finalization_state(x) != 1,
"bad finalization state 1")
if self.header(x).tid & GCFLAG_VISITED:
new_with_finalizer.append(x)
new_with_finalizer.append(fq_nr)
continue
marked.append(x)
marked.append(fq_nr)
pending.append(x)
while pending.non_empty():
y = pending.pop()
state = self._finalization_state(y)
if state == 0:
self._bump_finalization_state_from_0_to_1(y)
self.trace(y, self._append_if_nonnull, pending)
elif state == 2:
self._recursively_bump_finalization_state_from_2_to_3(y)
self._recursively_bump_finalization_state_from_1_to_2(x)
while marked.non_empty():
x = marked.popleft()
fq_nr = marked.popleft()
state = self._finalization_state(x)
ll_assert(state >= 2, "unexpected finalization state < 2")
if state == 2:
from rpython.rtyper.lltypesystem import rffi
fq_index = rffi.cast(lltype.Signed, fq_nr)
self.mark_finalizer_to_run(fq_index, x)
# we must also fix the state from 2 to 3 here, otherwise
# we leave the GCFLAG_FINALIZATION_ORDERING bit behind
# which will confuse the next collection
self._recursively_bump_finalization_state_from_2_to_3(x)
else:
new_with_finalizer.append(x)
new_with_finalizer.append(fq_nr)
self.tmpstack.delete()
pending.delete()
marked.delete()
self.old_objects_with_finalizers.delete()
self.old_objects_with_finalizers = new_with_finalizer
def _append_if_nonnull(pointer, stack):
stack.append(pointer.address[0])
_append_if_nonnull = staticmethod(_append_if_nonnull)
def _finalization_state(self, obj):
tid = self.header(obj).tid
if tid & GCFLAG_VISITED:
if tid & GCFLAG_FINALIZATION_ORDERING:
return 2
else:
return 3
else:
if tid & GCFLAG_FINALIZATION_ORDERING:
return 1
else:
return 0
def _bump_finalization_state_from_0_to_1(self, obj):
ll_assert(self._finalization_state(obj) == 0,
"unexpected finalization state != 0")
hdr = self.header(obj)
hdr.tid |= GCFLAG_FINALIZATION_ORDERING
def _recursively_bump_finalization_state_from_2_to_3(self, obj):
ll_assert(self._finalization_state(obj) == 2,
"unexpected finalization state != 2")
pending = self.tmpstack
ll_assert(not pending.non_empty(), "tmpstack not empty")
pending.append(obj)
while pending.non_empty():
y = pending.pop()
hdr = self.header(y)
if hdr.tid & GCFLAG_FINALIZATION_ORDERING: # state 2 ?
hdr.tid &= ~GCFLAG_FINALIZATION_ORDERING # change to state 3
self.trace(y, self._append_if_nonnull, pending)
def _recursively_bump_finalization_state_from_1_to_2(self, obj):
# recursively convert objects from state 1 to state 2.
# The call to visit_all_objects() will add the GCFLAG_VISITED
# recursively.
self.objects_to_trace.append(obj)
self.visit_all_objects()
# ----------
# Weakrefs
# The code relies on the fact that no weakref can be an old object
# weakly pointing to a young object. Indeed, weakrefs are immutable
# so they cannot point to an object that was created after it.
# Thanks to this, during a minor collection, we don't have to fix
# or clear the address stored in old weakrefs.
def invalidate_young_weakrefs(self):
"""Called during a nursery collection."""
# walk over the list of objects that contain weakrefs and are in the
# nursery. if the object it references survives then update the
# weakref; otherwise invalidate the weakref
while self.young_objects_with_weakrefs.non_empty():
obj = self.young_objects_with_weakrefs.pop()
if not self.is_forwarded(obj):
continue # weakref itself dies
obj = self.get_forwarding_address(obj)
offset = self.weakpointer_offset(self.get_type_id(obj))
pointing_to = (obj + offset).address[0]
if self.is_in_nursery(pointing_to):
if self.is_forwarded(pointing_to):
(obj + offset).address[0] = self.get_forwarding_address(
pointing_to)
else:
(obj + offset).address[0] = llmemory.NULL
continue # no need to remember this weakref any longer
#
elif (bool(self.young_rawmalloced_objects) and
self.young_rawmalloced_objects.contains(pointing_to)):
# young weakref to a young raw-malloced object
if self.header(pointing_to).tid & GCFLAG_VISITED:
pass # survives, but does not move
else:
(obj + offset).address[0] = llmemory.NULL
continue # no need to remember this weakref any longer
#
elif self.header(pointing_to).tid & GCFLAG_NO_HEAP_PTRS:
# see test_weakref_to_prebuilt: it's not useful to put
# weakrefs into 'old_objects_with_weakrefs' if they point
# to a prebuilt object (they are immortal). If moreover
# the 'pointing_to' prebuilt object still has the
# GCFLAG_NO_HEAP_PTRS flag, then it's even wrong, because
# 'pointing_to' will not get the GCFLAG_VISITED during
# the next major collection. Solve this by not registering
# the weakref into 'old_objects_with_weakrefs'.
continue
#
self.old_objects_with_weakrefs.append(obj)
def invalidate_old_weakrefs(self):
"""Called during a major collection."""
# walk over list of objects that contain weakrefs
# if the object it references does not survive, invalidate the weakref
new_with_weakref = self.AddressStack()
while self.old_objects_with_weakrefs.non_empty():
obj = self.old_objects_with_weakrefs.pop()
if self.header(obj).tid & GCFLAG_VISITED == 0:
continue # weakref itself dies
offset = self.weakpointer_offset(self.get_type_id(obj))
pointing_to = (obj + offset).address[0]
ll_assert((self.header(pointing_to).tid & GCFLAG_NO_HEAP_PTRS)
== 0, "registered old weakref should not "
"point to a NO_HEAP_PTRS obj")
if self.header(pointing_to).tid & GCFLAG_VISITED:
new_with_weakref.append(obj)
else:
(obj + offset).address[0] = llmemory.NULL
self.old_objects_with_weakrefs.delete()
self.old_objects_with_weakrefs = new_with_weakref
|
{
"content_hash": "19b5e67227f6d26cbe8256fa9621fc76",
"timestamp": "",
"source": "github",
"line_count": 2095,
"max_line_length": 79,
"avg_line_length": 45.75751789976134,
"alnum_prop": 0.5924558219106633,
"repo_name": "oblique-labs/pyVM",
"id": "8cb1a5f9b4456df4626f381ad80e2c72c9d814fc",
"size": "95862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpython/memory/gc/minimark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Awk",
"bytes": "271"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "771638"
},
{
"name": "C++",
"bytes": "12850"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "M4",
"bytes": "12737"
},
{
"name": "Makefile",
"bytes": "35222"
},
{
"name": "Objective-C",
"bytes": "2224"
},
{
"name": "Python",
"bytes": "18329219"
},
{
"name": "Shell",
"bytes": "15396"
},
{
"name": "Vim script",
"bytes": "1107"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask.ext.triangle import Triangle
#from flask.ext.mail import Mail
app = Flask(__name__)
Triangle(app)
# # app.config.update(dict(
# # DEBUG=True,
# # MAIL_SERVER='smtp.gmail.com',
# # MAIL_PORT=465,
# # MAIL_USE_TLS=False,
# # MAIL_USE_SSL=True,
# # MAIL_USERNAME='tanmaydatta@gmail.com',
# # MAIL_PASSWORD=''
# # ))
# mail = Mail(app)
from longclaw import views
|
{
"content_hash": "0d7a574043187eae0ad3d8c909f54349",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 46,
"avg_line_length": 23.77777777777778,
"alnum_prop": 0.6308411214953271,
"repo_name": "nirmitgoyal/longclaw",
"id": "7a0b6d72f9cc1760f6404eae49af0c10a91ff3d6",
"size": "428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "longclaw/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176"
}
],
"symlink_target": ""
}
|
import numpy as np
class StructuredModel(object):
"""Interface definition for Structured Learners.
This class defines what is necessary to use the structured svm.
You have to implement at least joint_feature and inference.
"""
def __repr__(self):
return ("%s, size_joint_feature: %d"
% (type(self).__name__, self.size_joint_feature))
def __init__(self):
"""Initialize the model.
Needs to set self.size_joint_feature, the dimensionalty of the joint features for
an instance with labeling (x, y).
"""
self.size_joint_feature = None
def _check_size_w(self, w):
if w.shape != (self.size_joint_feature,):
raise ValueError("Got w of wrong shape. Expected %s, got %s" %
(self.size_joint_feature, w.shape))
def initialize(self, X, Y):
# set any data-specific parameters in the model
pass
def joint_feature(self, x, y):
raise NotImplementedError()
def batch_joint_feature(self, X, Y, Y_true=None):
joint_feature_ = np.zeros(self.size_joint_feature)
if getattr(self, 'rescale_C', False):
for x, y, y_true in zip(X, Y, Y_true):
joint_feature_ += self.joint_feature(x, y, y_true)
else:
for x, y in zip(X, Y):
joint_feature_ += self.joint_feature(x, y)
return joint_feature_
def _loss_augmented_djoint_feature(self, x, y, y_hat, w):
# debugging only!
x_loss_augmented = self.loss_augment(x, y, w)
return (self.joint_feature(x_loss_augmented, y)
- self.joint_feature(x_loss_augmented, y_hat))
def inference(self, x, w, relaxed=None):
raise NotImplementedError()
def batch_inference(self, X, w, relaxed=None):
# default implementation of batch inference
return [self.inference(x, w, relaxed=relaxed)
for x in X]
def loss(self, y, y_hat):
# hamming loss:
if isinstance(y_hat, tuple):
return self.continuous_loss(y, y_hat[0])
if hasattr(self, 'class_weight'):
return np.sum(self.class_weight[y] * (y != y_hat))
return np.sum(y != y_hat)
def batch_loss(self, Y, Y_hat):
# default implementation of batch loss
return [self.loss(y, y_hat) for y, y_hat in zip(Y, Y_hat)]
def max_loss(self, y):
# maximum possible los on y for macro averages
if hasattr(self, 'class_weight'):
return np.sum(self.class_weight[y])
return y.size
def continuous_loss(self, y, y_hat):
# continuous version of the loss
# y is the result of linear programming
if y.ndim == 2:
raise ValueError("FIXME!")
gx = np.indices(y.shape)
# all entries minus correct ones
result = 1 - y_hat[gx, y]
if hasattr(self, 'class_weight'):
return np.sum(self.class_weight[y] * result)
return np.sum(result)
def loss_augmented_inference(self, x, y, w, relaxed=None):
print("FALLBACK no loss augmented inference found")
return self.inference(x, w)
def batch_loss_augmented_inference(self, X, Y, w, relaxed=None):
# default implementation of batch loss augmented inference
return [self.loss_augmented_inference(x, y, w, relaxed=relaxed)
for x, y in zip(X, Y)]
def _set_class_weight(self):
if not hasattr(self, 'size_joint_feature'):
# we are not initialized yet
return
if hasattr(self, 'n_labels'):
n_things = self.n_labels
else:
n_things = self.n_states
if self.class_weight is not None:
if len(self.class_weight) != n_things:
raise ValueError("class_weight must have length n_states or"
" be None")
self.class_weight = np.array(self.class_weight)
self.uniform_class_weight = False
else:
self.class_weight = np.ones(n_things)
self.uniform_class_weight = True
|
{
"content_hash": "164ecd74c3821b6d23959fe0532f588f",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 89,
"avg_line_length": 35.689655172413794,
"alnum_prop": 0.5772946859903382,
"repo_name": "amueller/pystruct",
"id": "c63fcdaf8de34c14ebd66a45adfb093229f913f4",
"size": "4140",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pystruct/models/base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "307"
},
{
"name": "Python",
"bytes": "376998"
},
{
"name": "Shell",
"bytes": "3960"
}
],
"symlink_target": ""
}
|
"""Test configuration files"""
import os
import sys
from pyrseas.config import Config
from pyrseas.cmdargs import cmd_parser, parse_args
from pyrseas.yamlutil import yamldump
USER_CFG_DATA = {'database': {'port': 5433},
'output': {'version_comment': True}}
CFG_TABLE_DATA = {'schema public': ['t1', 't2']}
CFG_DATA = {'datacopy': CFG_TABLE_DATA}
CFG_FILE = 'testcfg.yaml'
def test_defaults():
"Create a configuration with defaults"
cfg = Config()
for key in ['audit_columns', 'functions', 'function_templates', 'columns',
'triggers']:
assert key in cfg['augmenter']
for key in ['metadata', 'data']:
assert key in cfg['repository']
def test_user_config(tmpdir):
"Test a user configuration file"
f = tmpdir.join(CFG_FILE)
f.write(yamldump(USER_CFG_DATA))
os.environ["PYRSEAS_USER_CONFIG"] = f.strpath
cfg = Config()
assert cfg['database'] == {'port': 5433}
assert cfg['output'] == {'version_comment': True}
def test_repo_config(tmpdir):
"Test a repository configuration file"
ucfg = tmpdir.join(CFG_FILE)
ucfg.write(yamldump({'repository': {'path': tmpdir.strpath}}))
f = tmpdir.join("config.yaml")
f.write(yamldump(CFG_DATA))
os.environ["PYRSEAS_USER_CONFIG"] = ucfg.strpath
cfg = Config()
assert cfg['datacopy'] == CFG_TABLE_DATA
def test_cmd_parser(tmpdir):
"Test parsing a configuration file specified on the command line"
f = tmpdir.join(CFG_FILE)
f.write(yamldump(CFG_DATA))
sys.argv = ['testprog', 'testdb', '--config', f.strpath]
os.environ["PYRSEAS_USER_CONFIG"] = ''
parser = cmd_parser("Test description", '0.0.1')
cfg = parse_args(parser)
assert cfg['datacopy'] == CFG_TABLE_DATA
def test_parse_repo_config(tmpdir):
"Test parsing a repository configuration file in the current directory"
f = tmpdir.join('config.yaml')
f.write(yamldump(CFG_DATA))
os.chdir(tmpdir.strpath)
sys.argv = ['testprog', 'testdb']
os.environ["PYRSEAS_USER_CONFIG"] = ''
parser = cmd_parser("Test description", '0.0.1')
cfg = parse_args(parser)
assert cfg['datacopy'] == CFG_TABLE_DATA
def test_repo_user_config(tmpdir):
"Test a repository path specified in the user config"
usercfg = {'repository': {'path': tmpdir.strpath}}
userf = tmpdir.join("usercfg.yaml")
userf.write(yamldump(usercfg))
os.environ["PYRSEAS_USER_CONFIG"] = userf.strpath
repof = tmpdir.join("config.yaml")
repof.write(yamldump(CFG_DATA))
cfg = Config()
assert cfg['datacopy'] == CFG_TABLE_DATA
|
{
"content_hash": "a8e254de99be7655ede371bc8e6118b6",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 32.425,
"alnum_prop": 0.6534309946029299,
"repo_name": "perseas/Pyrseas",
"id": "faab758732abc2a871e225e386deac875868b908",
"size": "2618",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "385"
},
{
"name": "PLpgSQL",
"bytes": "55418"
},
{
"name": "Python",
"bytes": "769297"
}
],
"symlink_target": ""
}
|
import sys
import pytest
if sys.version_info >= (3, 3):
from unittest.mock import Mock
ABI = [{}]
ADDRESS = '0xd3CdA913deB6f67967B99D67aCDFa1712C293601'
NON_CHECKSUM_ADDRESS = '0xd3cda913deb6f67967b99d67acdfa1712c293601'
INVALID_CHECKSUM_ADDRESS = '0xd3CDA913deB6f67967B99D67aCDFa1712C293601'
@pytest.mark.parametrize(
'args,kwargs,expected',
(
((ADDRESS,), {}, None),
((INVALID_CHECKSUM_ADDRESS,), {}, ValueError),
((NON_CHECKSUM_ADDRESS,), {}, ValueError),
((), {'address': ADDRESS}, None),
((), {'address': INVALID_CHECKSUM_ADDRESS}, ValueError),
((), {'address': NON_CHECKSUM_ADDRESS}, ValueError),
)
)
def test_contract_address_validation(web3, args, kwargs, expected):
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
web3.eth.contract(*args, **kwargs)
return
# run without errors
web3.eth.contract(*args, **kwargs)
@pytest.mark.skipif(sys.version_info < (3, 3), reason="needs Mock library from 3.3")
def test_set_contract_factory(web3):
factoryClass = Mock()
web3.eth.setContractFactory(factoryClass)
web3.eth.contract(contract_name='myname')
factoryClass.factory.assert_called_once_with(web3, contract_name='myname')
|
{
"content_hash": "73ffe62e033749bea2075a0a437cf52e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 84,
"avg_line_length": 31.75609756097561,
"alnum_prop": 0.6781874039938556,
"repo_name": "pipermerriam/web3.py",
"id": "e60f492fcd90b07e52ca9309138f847816e083ce",
"size": "1302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/eth-module/test_eth_contract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "999"
},
{
"name": "Python",
"bytes": "619517"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PhotoPortfolio.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "bf5285657b58115cced559b5cd32fa79",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 78,
"avg_line_length": 26.11111111111111,
"alnum_prop": 0.7191489361702128,
"repo_name": "tsitra/PhotoPortfolio",
"id": "63e0ee7c020dc650c7132219c2fc60d90fa2b7ce",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37655"
},
{
"name": "JavaScript",
"bytes": "18939"
},
{
"name": "Python",
"bytes": "34850"
}
],
"symlink_target": ""
}
|
from wsstat.clients import WebsocketTestingClient
import asyncio
import datetime
import random
import websockets
import sys
if sys.version_info < (3, 4, 4):
asyncio.ensure_future = getattr(asyncio, 'async')
@asyncio.coroutine
def echo_time(websocket, path):
while True:
now = datetime.datetime.utcnow().isoformat() + 'Z'
try:
yield from websocket.send(now)
yield from asyncio.sleep(random.random() * 3)
except:
pass
class DemoClient(WebsocketTestingClient):
def __init__(self, websocket_url, **kwargs):
super().__init__('ws://127.0.0.1:65432', **kwargs)
def setup_tasks(self):
super().setup_tasks()
start_server = websockets.serve(echo_time, '127.0.0.1', 65432)
asyncio.ensure_future(start_server, loop=self.loop)
|
{
"content_hash": "0e9ebf94328f3c90786dc23d53d99bdc",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 70,
"avg_line_length": 28.517241379310345,
"alnum_prop": 0.6469165659008465,
"repo_name": "Fitblip/wsstat",
"id": "a7d0cbd2cf911daf9817cb70147cee4a0459bac2",
"size": "827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsstat/demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29909"
}
],
"symlink_target": ""
}
|
__author__= 'liez'
import random
import sqlite3
def make_number(num, length):
str = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
a = []
i = 0
while i < num:
numstr = ''
for j in range(length):
numstr += random.choice(str)
if numstr not in a: #如果没重复
a.append(numstr)
i += 1
print(a)
return a
def save(a):
try:
connect = sqlite3.connect('codelist.db')
except:
print("failed")
cur = connect.cursor()
cur.execute('create table if not exists codes(code char(20) primary key)')
for item in a:
cur.execute('insert into codes values (?)', [item])
print("success")
connect.commit()
cur.close()
connect.close()
save(make_number(20, 10))
|
{
"content_hash": "bda59765aea8d5e88744f5ab1ede843e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 23.38235294117647,
"alnum_prop": 0.5849056603773585,
"repo_name": "Show-Me-the-Code/python",
"id": "a32ef373e8d86ea50aa07b11643e9f922ae5142f",
"size": "822",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Liez-python-code/0002/0002.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3948"
},
{
"name": "C++",
"bytes": "5518"
},
{
"name": "CSS",
"bytes": "3474"
},
{
"name": "HTML",
"bytes": "1218870"
},
{
"name": "Java",
"bytes": "141"
},
{
"name": "JavaScript",
"bytes": "5282"
},
{
"name": "Jupyter Notebook",
"bytes": "324817"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "579400"
}
],
"symlink_target": ""
}
|
from typing import List, Dict
from fedlearner_webconsole.utils.file_manager import FileManagerBase
class FakeFileManager(FileManagerBase):
def can_handle(self, path: str) -> bool:
return path.startswith('fake://')
def ls(self, path: str, recursive=False) -> List[Dict]:
return [{'path': 'fake://data/f1.txt',
'size': 0}]
def move(self, source: str, destination: str) -> bool:
return source.startswith('fake://move')
def remove(self, path: str) -> bool:
return path.startswith('fake://remove')
def copy(self, source: str, destination: str) -> bool:
return source.startswith('fake://copy')
def mkdir(self, path: str) -> bool:
return path.startswith('fake://mkdir')
|
{
"content_hash": "bb5973f7c927cf9eaecfae2acf0c0786",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 68,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6302631578947369,
"repo_name": "bytedance/fedlearner",
"id": "4a672d32703f745c4d4f4103e1d07362b4c3b4ef",
"size": "1385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_console_v2/api/testing/fake_file_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "25817"
},
{
"name": "CSS",
"bytes": "7766"
},
{
"name": "Dockerfile",
"bytes": "6341"
},
{
"name": "Go",
"bytes": "163506"
},
{
"name": "HTML",
"bytes": "3527"
},
{
"name": "JavaScript",
"bytes": "482972"
},
{
"name": "Less",
"bytes": "14981"
},
{
"name": "Lua",
"bytes": "8088"
},
{
"name": "Makefile",
"bytes": "2869"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Mustache",
"bytes": "35891"
},
{
"name": "Python",
"bytes": "2412335"
},
{
"name": "Shell",
"bytes": "118210"
},
{
"name": "TypeScript",
"bytes": "805827"
}
],
"symlink_target": ""
}
|
import os
import sys
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.types import PublishOptions
if 'MYTICKET' in os.environ and len(sys.argv) > 1:
# principal from command line, ticket from environment variable
PRINCIPAL = sys.argv[1]
PRINCIPAL_TICKET = os.environ['MYTICKET']
else:
raise RuntimeError('missing authid or auth secret (from env var MYTICKET)')
print("Principal '{}' using ticket '{}'".format(PRINCIPAL, PRINCIPAL_TICKET))
class ClientSession(ApplicationSession):
def onConnect(self):
print("Client session connected. Starting WAMP-Ticket authentication on realm '{}' as principal '{}' ..".format(self.config.realm, PRINCIPAL))
self.join(self.config.realm, ["ticket"], PRINCIPAL)
def onChallenge(self, challenge):
if challenge.method == "ticket":
print("WAMP-Ticket challenge received: {}".format(challenge))
return PRINCIPAL_TICKET
else:
raise Exception("Invalid authmethod {}".format(challenge.method))
@inlineCallbacks
def onJoin(self, details):
print("Client session joined: {}".format(details))
## call a procedure we are allowed to call (so this should succeed)
##
try:
res = yield self.call('com.example.add2', 2, 3)
print("call result: {}".format(res))
except Exception as e:
print("call error: {}".format(e))
## (try to) register a procedure where we are not allowed to (so this should fail)
##
try:
reg = yield self.register(lambda x, y: x * y, 'com.example.mul2')
except Exception as e:
print("registration failed (this is expected!) {}".format(e))
## publish to a couple of topics we are allowed to publish to.
##
for topic in [
'com.example.topic1',
'com.foobar.topic1']:
try:
yield self.publish(topic, "hello", options = PublishOptions(acknowledge = True))
print("ok, event published to topic {}".format(topic))
except Exception as e:
print("publication to topic {} failed: {}".format(topic, e))
## (try to) publish to a couple of topics we are not allowed to publish to (so this should fail)
##
for topic in [
'com.example.topic2',
'com.foobar.topic2']:
try:
yield self.publish(topic, "hello", options = PublishOptions(acknowledge = True))
print("ok, event published to topic {}".format(topic))
except Exception as e:
print("publication to topic {} failed (this is expected!) {}".format(topic, e))
self.leave()
def onLeave(self, details):
print("Client session left: {}".format(details))
self.config.extra['exit_details'] = details
self.disconnect()
def onDisconnect(self):
print("Client session disconnected.")
reactor.stop()
if __name__ == '__main__':
from autobahn.twisted.wamp import ApplicationRunner
extra = {
'exit_details': None,
}
runner = ApplicationRunner(url='ws://localhost:8080/ws', realm='realm1', extra=extra)
runner.run(ClientSession)
# CloseDetails(reason=<wamp.error.not_authorized>, message='WAMP-CRA signature is invalid')
print(extra['exit_details'])
if extra['exit_details'].reason != 'wamp.close.normal':
sys.exit(1)
else:
sys.exit(0)
|
{
"content_hash": "e54ad4a4fb8ed1a66d2f57eb089f9d71",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 148,
"avg_line_length": 33.640776699029125,
"alnum_prop": 0.6424242424242425,
"repo_name": "crossbario/crossbarexamples",
"id": "73cb049a07e861b24ea7a863ede83b69b8842ba1",
"size": "3465",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "authentication/ticket/dynamic/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "34842"
},
{
"name": "Batchfile",
"bytes": "5120"
},
{
"name": "C#",
"bytes": "7363"
},
{
"name": "C++",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "739790"
},
{
"name": "Erlang",
"bytes": "8230"
},
{
"name": "HTML",
"bytes": "5779589"
},
{
"name": "Java",
"bytes": "10353"
},
{
"name": "JavaScript",
"bytes": "841304"
},
{
"name": "Jupyter Notebook",
"bytes": "334490"
},
{
"name": "Lua",
"bytes": "1233"
},
{
"name": "Makefile",
"bytes": "34725"
},
{
"name": "PHP",
"bytes": "45760"
},
{
"name": "PLSQL",
"bytes": "402990"
},
{
"name": "PLpgSQL",
"bytes": "5053"
},
{
"name": "Python",
"bytes": "473452"
},
{
"name": "Ruby",
"bytes": "6184"
},
{
"name": "SQLPL",
"bytes": "4983"
},
{
"name": "Shell",
"bytes": "12809"
}
],
"symlink_target": ""
}
|
import os
import sys
import pytest
## 3rd party
import pandas as pd
## package
from pyTecanFluent import Fluent
from pyTecanFluent import Labware
# data dir
test_dir = os.path.join(os.path.dirname(__file__))
data_dir = os.path.join(test_dir, 'data')
# tests
def test_db():
db = Fluent.db()
RackTypes = db.RackTypes()
assert isinstance(RackTypes, list)
RackType = db.RackTypes()[0]
v = db.get_labware(RackType)
assert isinstance(v, dict)
def test_aspirate():
asp = Fluent.Aspirate()
asp.RackLabel = 'test'
asp.RackType = 'test'
assert isinstance(asp.cmd(), str)
def test_dispense():
disp = Fluent.Dispense()
disp.RackLabel = 'test'
disp.RackType = 'test'
assert isinstance(disp.cmd(), str)
def test_comment():
c = Fluent.Comment()
assert isinstance(c.cmd(), str)
def test_waste():
w = Fluent.Waste()
assert isinstance(w.cmd(), str)
def test_gwl():
gwl = Fluent.gwl()
with pytest.raises(AssertionError):
gwl.add(Fluent.Aspirate())
def test_labware():
lw = Labware.labware()
gwl = Fluent.gwl()
ret = lw.add_gwl(gwl)
assert ret is None
|
{
"content_hash": "913245bacb6d579e775622badaa940b0",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 50,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6358974358974359,
"repo_name": "leylabmpi/pyTecanFluent",
"id": "2c1dd536cb537f7a9a20da00220ae187a3f81332",
"size": "1239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_Fluent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "217706"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import Permission
from django.db.models import Q
from mptt.managers import TreeManager
class ItemManager(TreeManager):
"""
Specific manager for mojo.navigation Item models.
"""
def for_slug(self, slug):
"""
Filters items for a specific tree by its slug.
:param slug: Slugified string.
:type slug: str
Usage exemple::
tree_items = Item.objects.for_slug('slug_exemple')
"""
queryset = super(ItemManager, self).get_queryset()
return queryset.filter(slug=slug).get_descendants(include_self=True)
def for_user(self, user):
"""
Filters items for a specific user and his permissions.
:param user: Django user object instance.
:type user: obj
Usage exemple::
tree_items = Item.objects.for_user(request.user)
"""
queryset = super(ItemManager, self).get_queryset()
# if user is superuser, do not filter anything.
if user.is_superuser:
return queryset
# if user is not authenticated remove tree items only set for authenticated users.
if not user.is_active:
queryset = queryset.exclude(access_loggedin=True)
# then add items that user has group access to.
groups = user.groups.all()
queryset = queryset.filter(Q(access_group=None) | Q(access_group__in=groups))
# then add items that user has permission access to.
group_permissions = Permission.objects.filter(group__in=groups)
queryset = queryset.filter(Q(access_permissions=None)
| Q(access_permissions__in=user.user_permissions.all())
| Q(access_permissions__in=group_permissions))
return queryset
|
{
"content_hash": "a0f2011ae32e12189fcc1275863fd9ed",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 90,
"avg_line_length": 35.84313725490196,
"alnum_prop": 0.6214442013129103,
"repo_name": "django-mojo/mojo-navigation",
"id": "149e3a31b0604c628d29c1f53083e9e24a66003f",
"size": "1828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mojo/navigation/managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "27468"
}
],
"symlink_target": ""
}
|
import os
import sys, getopt, argparse
import logging
import json
import time
import requests
class BadCallError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class LatencyTest(object):
def __init__(self):
self.latency_sum = 0
def callRest(self,json,url,key):
params = {}
params["consumer_key"] = key
params["json"] = json
params["jsonpCallback"] = "unused"
time1 = time.time()
r = requests.get(url+"/js/predict",params=params)
time2 = time.time()
self.latency_sum += ((time2-time1)*1000.0)
if r.status_code == requests.codes.ok:
return
else:
raise BadCallError("bad http reponse "+str(r.status_code))
def run(self,json_filename,url,key,num_requests):
reqs = 0
while True:
with open(json_filename) as f:
for line in f:
line = line.rstrip();
data = json.loads(line)
callJson = {"data":data}
callJsonStr = json.dumps(callJson)
self.callRest(callJsonStr,url,key)
reqs += 1
if reqs >= num_requests:
return
def print_stats(self,num_requests):
print "%d calls, avg %0.3f ms" % (num_requests,self.latency_sum/num_requests)
if __name__ == '__main__':
import logging
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(name)s : %(message)s', level=logging.DEBUG)
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(prog='create_replay')
parser.add_argument('--json', help='json file', required=True)
parser.add_argument('--server-url', help='server url', required=True)
parser.add_argument('--key', help='js key', required=True)
parser.add_argument('--num-requests', help='number of requests to run',type=int,default=1)
args = parser.parse_args()
opts = vars(args)
lt = LatencyTest()
lt.run(args.json,args.server_url,args.key,args.num_requests)
lt.print_stats(args.num_requests)
|
{
"content_hash": "ad18b94f4e84bed4ed286acb7ee41894",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 107,
"avg_line_length": 32.18840579710145,
"alnum_prop": 0.5790184601530842,
"repo_name": "SeldonIO/seldon-server",
"id": "0ce3b42b4e35094e0a5092528ac30aaded2e951d",
"size": "2221",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docker/grpc-util/python/jsonLatencyTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1926357"
},
{
"name": "JavaScript",
"bytes": "48430"
},
{
"name": "Jupyter Notebook",
"bytes": "112349"
},
{
"name": "Makefile",
"bytes": "56033"
},
{
"name": "Python",
"bytes": "595373"
},
{
"name": "Ruby",
"bytes": "423"
},
{
"name": "Scala",
"bytes": "378790"
},
{
"name": "Shell",
"bytes": "122552"
}
],
"symlink_target": ""
}
|
"""PlannerOverride API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class PlannerOverrideAPI(BaseCanvasAPI):
"""PlannerOverride API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for PlannerOverrideAPI."""
super(PlannerOverrideAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.PlannerOverrideAPI")
def list_planner_items(self, end_date=None, filter=None, start_date=None):
"""
List planner items.
Retrieve the list of objects to be shown on the planner for the current user
with the associated planner override to override an item's visibility if set.
[
{
"context_type": "Course",
"course_id": 1,
"visible_in_planner": true, // Whether or not it is displayed on the student planner
"planner_override": { ... planner override object ... }, // Associated PlannerOverride object if user has toggled visibility for the object on the planner
"submissions": false, // The statuses of the user's submissions for this object
"plannable_id": "123",
"plannable_type": "discussion_topic",
"plannable": { ... discussion topic object },
"html_url": "/courses/1/discussion_topics/8"
},
{
"context_type": "Course",
"course_id": 1,
"visible_in_planner": true,
"planner_override": {
"id": 3,
"plannable_type": "Assignment",
"plannable_id": 1,
"user_id": 2,
"workflow_state": "active",
"marked_complete": true, // A user-defined setting for marking items complete in the planner
"dismissed": false, // A user-defined setting for hiding items from the opportunities list
"deleted_at": null,
"created_at": "2017-05-18T18:35:55Z",
"updated_at": "2017-05-18T18:35:55Z"
},
"submissions": { // The status as it pertains to the current user
"excused": false,
"graded": false,
"late": false,
"missing": true,
"needs_grading": false,
"with_feedback": false
},
"plannable_id": "456",
"plannable_type": "assignment",
"plannable": { ... assignment object ... },
"html_url": "http://canvas.instructure.com/courses/1/assignments/1#submit"
},
{
"visible_in_planner": true,
"planner_override": null,
"submissions": false, // false if no associated assignment exists for the plannable item
"plannable_id": "789",
"plannable_type": "planner_note",
"plannable": {
"id": 1,
"todo_date": "2017-05-30T06:00:00Z",
"title": "hello",
"details": "world",
"user_id": 2,
"course_id": null,
"workflow_state": "active",
"created_at": "2017-05-30T16:29:04Z",
"updated_at": "2017-05-30T16:29:15Z"
},
"html_url": "http://canvas.instructure.com/api/v1/planner_notes.1"
}
]
"""
path = {}
data = {}
params = {}
# OPTIONAL - start_date
"""
Only return items starting from the given date.
The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ.
"""
if start_date is not None:
params["start_date"] = start_date
# OPTIONAL - end_date
"""
Only return items up to the given date.
The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ.
"""
if end_date is not None:
params["end_date"] = end_date
# OPTIONAL - filter
"""
Only return items that have new or unread activity
"""
if filter is not None:
self._validate_enum(filter, ["new_activity"])
params["filter"] = filter
self.logger.debug(
"GET /api/v1/planner/items with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/planner/items".format(**path),
data=data,
params=params,
no_data=True,
)
def list_planner_overrides(self):
"""
List planner overrides.
Retrieve a planner override for the current user
"""
path = {}
data = {}
params = {}
self.logger.debug(
"GET /api/v1/planner/overrides with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/planner/overrides".format(**path),
data=data,
params=params,
all_pages=True,
)
def show_planner_override(self, id):
"""
Show a planner override.
Retrieve a planner override for the current user
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"GET /api/v1/planner/overrides/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/planner/overrides/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def update_planner_override(self, id, dismissed=None, marked_complete=None):
"""
Update a planner override.
Update a planner override's visibilty for the current user
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - marked_complete
"""
determines whether the planner item is marked as completed
"""
if marked_complete is not None:
data["marked_complete"] = marked_complete
# OPTIONAL - dismissed
"""
determines whether the planner item shows in the opportunities list
"""
if dismissed is not None:
data["dismissed"] = dismissed
self.logger.debug(
"PUT /api/v1/planner/overrides/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/planner/overrides/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def create_planner_override(
self,
dismissed=None,
marked_complete=None,
plannable_id=None,
plannable_type=None,
):
"""
Create a planner override.
Create a planner override for the current user
"""
path = {}
data = {}
params = {}
# OPTIONAL - plannable_type
"""
Type of the item that you are overriding in the planner
"""
if plannable_type is not None:
self._validate_enum(
plannable_type,
[
"announcement",
"assignment",
"discussion_topic",
"quiz",
"wiki_page",
"planner_note",
],
)
data["plannable_type"] = plannable_type
# OPTIONAL - plannable_id
"""
ID of the item that you are overriding in the planner
"""
if plannable_id is not None:
data["plannable_id"] = plannable_id
# OPTIONAL - marked_complete
"""
If this is true, the item will show in the planner as completed
"""
if marked_complete is not None:
data["marked_complete"] = marked_complete
# OPTIONAL - dismissed
"""
If this is true, the item will not show in the opportunities list
"""
if dismissed is not None:
data["dismissed"] = dismissed
self.logger.debug(
"POST /api/v1/planner/overrides with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/planner/overrides".format(**path),
data=data,
params=params,
single_item=True,
)
def delete_planner_override(self, id):
"""
Delete a planner override.
Delete a planner override for the current user
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"DELETE /api/v1/planner/overrides/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/planner/overrides/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
class Planneroverride(BaseModel):
"""Planneroverride Model.
User-controlled setting for whether an item should be displayed on the planner or not"""
def __init__(
self,
id=None,
plannable_type=None,
plannable_id=None,
user_id=None,
workflow_state=None,
marked_complete=None,
dismissed=None,
created_at=None,
updated_at=None,
deleted_at=None,
):
"""Init method for Planneroverride class."""
self._id = id
self._plannable_type = plannable_type
self._plannable_id = plannable_id
self._user_id = user_id
self._workflow_state = workflow_state
self._marked_complete = marked_complete
self._dismissed = dismissed
self._created_at = created_at
self._updated_at = updated_at
self._deleted_at = deleted_at
self.logger = logging.getLogger("py3canvas.Planneroverride")
@property
def id(self):
"""The ID of the planner override."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def plannable_type(self):
"""The type of the associated object for the planner override."""
return self._plannable_type
@plannable_type.setter
def plannable_type(self, value):
"""Setter for plannable_type property."""
self.logger.warn(
"Setting values on plannable_type will NOT update the remote Canvas instance."
)
self._plannable_type = value
@property
def plannable_id(self):
"""The id of the associated object for the planner override."""
return self._plannable_id
@plannable_id.setter
def plannable_id(self, value):
"""Setter for plannable_id property."""
self.logger.warn(
"Setting values on plannable_id will NOT update the remote Canvas instance."
)
self._plannable_id = value
@property
def user_id(self):
"""The id of the associated user for the planner override."""
return self._user_id
@user_id.setter
def user_id(self, value):
"""Setter for user_id property."""
self.logger.warn(
"Setting values on user_id will NOT update the remote Canvas instance."
)
self._user_id = value
@property
def workflow_state(self):
"""The current published state of the item, synced with the associated object."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn(
"Setting values on workflow_state will NOT update the remote Canvas instance."
)
self._workflow_state = value
@property
def marked_complete(self):
"""Controls whether or not the associated plannable item is marked complete on the planner."""
return self._marked_complete
@marked_complete.setter
def marked_complete(self, value):
"""Setter for marked_complete property."""
self.logger.warn(
"Setting values on marked_complete will NOT update the remote Canvas instance."
)
self._marked_complete = value
@property
def dismissed(self):
"""Controls whether or not the associated plannable item shows up in the opportunities list."""
return self._dismissed
@dismissed.setter
def dismissed(self, value):
"""Setter for dismissed property."""
self.logger.warn(
"Setting values on dismissed will NOT update the remote Canvas instance."
)
self._dismissed = value
@property
def created_at(self):
"""The datetime of when the planner override was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn(
"Setting values on created_at will NOT update the remote Canvas instance."
)
self._created_at = value
@property
def updated_at(self):
"""The datetime of when the planner override was updated."""
return self._updated_at
@updated_at.setter
def updated_at(self, value):
"""Setter for updated_at property."""
self.logger.warn(
"Setting values on updated_at will NOT update the remote Canvas instance."
)
self._updated_at = value
@property
def deleted_at(self):
"""The datetime of when the planner override was deleted, if applicable."""
return self._deleted_at
@deleted_at.setter
def deleted_at(self, value):
"""Setter for deleted_at property."""
self.logger.warn(
"Setting values on deleted_at will NOT update the remote Canvas instance."
)
self._deleted_at = value
|
{
"content_hash": "643e5eeea5ed384cebb6278e30f96e67",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 166,
"avg_line_length": 31.324324324324323,
"alnum_prop": 0.5395234618703126,
"repo_name": "tylerclair/py3canvas",
"id": "bf06281d199e015eed52ea319b59829e1af1ccf9",
"size": "15067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3canvas/apis/planner_override.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1988347"
}
],
"symlink_target": ""
}
|
from django import forms
class AddRestaurant(forms.Form):
name = forms.CharField(label='Nombre', max_length=50)
id_restaurant = forms.CharField(label='Identificador', max_length=8)
direc = forms.CharField(label='Dirección', max_length=50)
zipcode = forms.CharField(label='Código Postal', max_length=5)
city = forms.CharField(label='Ciudad', max_length=15)
image = forms.FileField(label='Imagen')
|
{
"content_hash": "620f10e133a63a04325b87a844ab82a0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 46.77777777777778,
"alnum_prop": 0.7220902612826603,
"repo_name": "jmanday/Master",
"id": "6f7c4d5f3462392850e807534b3c47d6bb0a9378",
"size": "423",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "SSBW/Tareas/Tarea5/restaurantes/appRestaurants/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "309067"
},
{
"name": "Batchfile",
"bytes": "71697"
},
{
"name": "C",
"bytes": "3962470"
},
{
"name": "C#",
"bytes": "125762"
},
{
"name": "C++",
"bytes": "216284659"
},
{
"name": "CMake",
"bytes": "1594049"
},
{
"name": "CSS",
"bytes": "1737798"
},
{
"name": "CWeb",
"bytes": "174166"
},
{
"name": "Clojure",
"bytes": "1487"
},
{
"name": "Cuda",
"bytes": "1741779"
},
{
"name": "DIGITAL Command Language",
"bytes": "6246"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "HLSL",
"bytes": "3314"
},
{
"name": "HTML",
"bytes": "192312054"
},
{
"name": "IDL",
"bytes": "28"
},
{
"name": "Java",
"bytes": "1111092"
},
{
"name": "JavaScript",
"bytes": "1906363"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "M4",
"bytes": "29689"
},
{
"name": "Makefile",
"bytes": "8410569"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Objective-C",
"bytes": "12659"
},
{
"name": "Objective-C++",
"bytes": "211927"
},
{
"name": "PHP",
"bytes": "140802"
},
{
"name": "Pascal",
"bytes": "26079"
},
{
"name": "Perl",
"bytes": "54411"
},
{
"name": "PowerShell",
"bytes": "16406"
},
{
"name": "Python",
"bytes": "2808348"
},
{
"name": "QML",
"bytes": "593"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "R",
"bytes": "69855"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Roff",
"bytes": "5189"
},
{
"name": "Ruby",
"bytes": "9652"
},
{
"name": "Scala",
"bytes": "5683"
},
{
"name": "Shell",
"bytes": "416161"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "1096187"
},
{
"name": "XSLT",
"bytes": "553585"
},
{
"name": "Yacc",
"bytes": "19623"
}
],
"symlink_target": ""
}
|
from flask import redirect
from flask_admin import Admin, AdminIndexView, expose
from flask_admin.menu import url_for
from flask_login import current_user
from plenario.database import postgres_session
from plenario.models.SensorNetwork import FeatureMeta, NetworkMeta, NodeMeta, SensorMeta
from .admin_views import admin_views
from .views import blueprint, index
class ApiaryIndexView(AdminIndexView):
def is_accessible(self):
return current_user.is_authenticated
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('auth.login'))
@expose('/')
def index(self):
try:
return self.render('apiary/index.html', elements=index())
except KeyError:
return self.render('apiary/index.html', elements=[])
admin = Admin(
index_view=ApiaryIndexView(url='/apiary'),
name='Plenario',
template_mode='bootstrap3',
url='/apiary',
)
admin.add_view(admin_views['FOI'](FeatureMeta, postgres_session))
admin.add_view(admin_views['Sensor'](SensorMeta, postgres_session))
admin.add_view(admin_views['Network'](NetworkMeta, postgres_session))
admin.add_view(admin_views['Node'](NodeMeta, postgres_session))
apiary = admin
apiary_bp = blueprint
|
{
"content_hash": "ffc61901bd3fe713a4987692c0b8b2cc",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 30.95,
"alnum_prop": 0.7245557350565428,
"repo_name": "UrbanCCD-UChicago/plenario",
"id": "24d285fae09f15b1478a96d1e15834ef38014adf",
"size": "1238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plenario/apiary/blueprints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22088"
},
{
"name": "Dockerfile",
"bytes": "514"
},
{
"name": "HTML",
"bytes": "100643"
},
{
"name": "JavaScript",
"bytes": "15770"
},
{
"name": "PLpgSQL",
"bytes": "594"
},
{
"name": "Python",
"bytes": "487024"
}
],
"symlink_target": ""
}
|
"""
Common Auth Middleware.
"""
from oslo.config import cfg
import webob.dec
import webob.exc
from nova import context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
auth_opts = [
cfg.BoolOpt('api_rate_limit',
default=False,
help=('Whether to use per-user rate limiting for the api. '
'This option is only used by v2 api. Rate limiting '
'is removed from v3 api.')),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth: noauth or keystone.'),
cfg.BoolOpt('use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
LOG = logging.getLogger(__name__)
def _load_pipeline(loader, pipeline):
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
# NOTE (Alex Xu): This is just for configuration file compatibility.
# If the configuration file still contains 'ratelimit_v3', just ignore it.
# We will remove this code at next release (J)
if 'ratelimit_v3' in pipeline:
LOG.warn(_('ratelimit_v3 is removed from v3 api.'))
pipeline.remove('ratelimit_v3')
return _load_pipeline(loader, pipeline)
def pipeline_factory_v3(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
return _load_pipeline(loader, local_conf[CONF.auth_strategy].split())
class InjectContext(wsgi.Middleware):
"""Add a 'nova.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
req.environ['nova.context'] = self.context
return self.application
class NovaKeystoneContext(wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
roles = self._get_roles(req)
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
user_name = req.headers.get('X_USER_NAME')
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
ctx = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog)
req.environ['nova.context'] = ctx
return self.application
def _get_roles(self, req):
"""Get the list of roles."""
if 'X_ROLES' in req.headers:
roles = req.headers.get('X_ROLES', '')
else:
# Fallback to deprecated role header:
roles = req.headers.get('X_ROLE', '')
if roles:
LOG.warn(_("Sourcing roles from deprecated X-Role HTTP "
"header"))
return [r.strip() for r in roles.split(',')]
|
{
"content_hash": "6b0645ed24c984cb535e8c64e4731a7c",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 35.923611111111114,
"alnum_prop": 0.583607191184999,
"repo_name": "ewindisch/nova",
"id": "f15648a77a249f8f3d08e9ccca5646285aaa41d4",
"size": "5789",
"binary": false,
"copies": "13",
"ref": "refs/heads/docker-ci",
"path": "nova/api/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13736252"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
}
|
from google.cloud.spanner import Client
from .streaming_utils import INSTANCE_NAME as STREAMING_INSTANCE
STANDARD_INSTANCE = "google-cloud-python-systest"
def scrub_instances(client):
for instance in client.list_instances():
if instance.name == STREAMING_INSTANCE:
print("Not deleting streaming instance: {}".format(STREAMING_INSTANCE))
continue
elif instance.name == STANDARD_INSTANCE:
print("Not deleting standard instance: {}".format(STANDARD_INSTANCE))
else:
print("deleting instance: {}".format(instance.name))
instance.delete()
if __name__ == "__main__":
client = Client()
scrub_instances(client)
|
{
"content_hash": "926c193a46c83bd57f90c9eab3c1ceb3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 83,
"avg_line_length": 33.523809523809526,
"alnum_prop": 0.6590909090909091,
"repo_name": "dhermes/gcloud-python",
"id": "79cd51fdfc94b1fc37650bf1032578ad2b6ead78",
"size": "1300",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "spanner/tests/system/utils/scrub_instances.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "95635"
},
{
"name": "Python",
"bytes": "2871895"
},
{
"name": "Shell",
"bytes": "4683"
}
],
"symlink_target": ""
}
|
"""Package contenant la commande 'détailler'."""
from primaires.interpreteur.commande.commande import Commande
from secondaires.navigation.visible import Visible
class CmdDetailler(Commande):
"""Commande 'détailler'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "détailler", "detail")
self.nom_categorie = "navire"
self.schema = "<point_visible>"
self.aide_courte = "affiche les détails de l'étendue d'eau"
self.aide_longue = \
"Cette commande permet à un navigateur de connaître les " \
"détails qui l'entourent. Sans paramètre, cette commande " \
"affiche les côtes, ports, navires visibles sur l'étendue. " \
"Vous pouvez préciser en paramètre un point à détailler " \
"plus particulièrement sous la forme d'une direction, comme " \
"|cmd|arrière|ff|, |cmd|bâbord|ff|, |cmd|tribord|ff| ou " \
"|cmd|avant|ff|. Vous verrez alors dans un champ plus " \
"restreint mais aussi plus détaillé."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
salle = personnage.salle
if not hasattr(salle, "navire") or salle.navire is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
etendue = navire.etendue
if salle.interieur:
personnage << "|err|Vous ne pouvez rien voir d'ici.|ff|"
return
msg = dic_masques["point_visible"].retour
if msg:
personnage << msg
else:
personnage << "Rien n'est en vue auprès de vous."
|
{
"content_hash": "a78d33c4357f16180a43d37efbd61248",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 75,
"avg_line_length": 40.116279069767444,
"alnum_prop": 0.6075362318840579,
"repo_name": "stormi/tsunami",
"id": "6d6f600482f20b804499633db1b80a924c3f21d7",
"size": "3311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/navigation/commandes/detailler/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ClusterDestroyedEvent(vim, *args, **kwargs):
'''This event records when a cluster is destroyed.'''
obj = vim.client.factory.create('ns0:ClusterDestroyedEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
{
"content_hash": "6ecaa3bf6ac1cbcfc486673b75654587",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 124,
"avg_line_length": 34.72727272727273,
"alnum_prop": 0.5968586387434555,
"repo_name": "xuru/pyvisdk",
"id": "3f34a3c70c064193c1257c5cbccbcd3420553c31",
"size": "1147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/cluster_destroyed_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Location'
db.create_table('events_location', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=100)),
('street', self.gf('django.db.models.fields.CharField')(max_length=255)),
('number', self.gf('django.db.models.fields.CharField')(max_length='15')),
('district', self.gf('django.db.models.fields.CharField')(max_length='255')),
('postal_code', self.gf('django.db.models.fields.CharField')(max_length='50')),
('city', self.gf('django.db.models.fields.CharField')(max_length=50)),
('state', self.gf('django.db.models.fields.CharField')(max_length='50')),
('country', self.gf('django.db.models.fields.CharField')(max_length='50')),
('reference', self.gf('django.db.models.fields.CharField')(max_length=100)),
('map', self.gf('django.db.models.fields.URLField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('events', ['Location'])
# Adding field 'Event.location'
db.add_column('events_event', 'location',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['events.Location']),
keep_default=False)
# Adding field 'Talk.location'
db.add_column('events_talk', 'location',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['events.Event']),
keep_default=False)
def backwards(self, orm):
# Deleting model 'Location'
db.delete_table('events_location')
# Deleting field 'Event.location'
db.delete_column('events_event', 'location_id')
# Deleting field 'Talk.location'
db.delete_column('events_talk', 'location_id')
models = {
'events.event': {
'Meta': {'object_name': 'Event'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'full_description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Location']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'events.location': {
'Meta': {'object_name': 'Location'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': "'15'"}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'events.talk': {
'Meta': {'object_name': 'Talk'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Event']"}),
'macro_theme': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '14'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'talk_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['events']
|
{
"content_hash": "6b1159437d8fefe69867747b1d184df4",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 116,
"avg_line_length": 57.60919540229885,
"alnum_prop": 0.557462090981644,
"repo_name": "pugpe/pugpe",
"id": "46a8520a072c891b3f7596c4fb8b503c8a2025a4",
"size": "5036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/events/migrations/0003_auto__add_location__add_field_event_location__add_field_talk_location.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8063"
},
{
"name": "Dockerfile",
"bytes": "80"
},
{
"name": "HTML",
"bytes": "26786"
},
{
"name": "Makefile",
"bytes": "615"
},
{
"name": "Python",
"bytes": "192729"
}
],
"symlink_target": ""
}
|
"""
chex
Indexes chess game states from one or more PGN files
(https://en.wikipedia.org/wiki/Portable_Game_Notation) with Spotify's annoy
(https://github.com/spotify/annoy) so the user can search for game states
similar to a game state they input as well as the games in which they're
found.
Requires https://pypi.python.org/pypi/python-chess,
https://github.com/spotify/annoy, and https://pypi.python.org/pypi/sqlitedict.
"""
import chess
import chess.pgn
import struct
import binascii
import argparse
import errno
import os
import sys
import time
import random
import atexit
import shutil
import copy
import tempfile
import logging
from math import sqrt
from annoy import AnnoyIndex
from sqlitedict import SqliteDict
_help_intro = """chex is a search engine for chess game states."""
def help_formatter(prog):
""" So formatter_class's max_help_position can be changed. """
return argparse.HelpFormatter(prog, max_help_position=40)
# For bitboard conversion
_offsets = {
'p' : 0,
'P' : 1,
'n' : 2,
'N' : 3,
'b' : 4,
'B' : 5,
'k' : 6,
'K' : 7,
'r' : 8,
'R' : 9,
'q' : 10,
'Q' : 11,
}
_reverse_offsets = { value : key for key, value in _offsets.items() }
_reverse_colors_offsets = {
'p' : 1,
'P' : 0,
'n' : 3,
'N' : 2,
'b' : 5,
'B' : 4,
'k' : 7,
'K' : 6,
'r' : 9,
'R' : 8,
'q' : 11,
'Q' : 10,
}
_bitboard_length = 768
def board_to_bitboard(board):
""" Converts chess module's board to bitboard game state representation.
node: game object of type chess.pgn.Game
Return value: binary vector of length _bitboard_length as Python list
"""
bitboard = [0 for _ in xrange(_bitboard_length)]
for i in xrange(64):
try:
bitboard[i*12 + _offsets[board.piece_at(i).symbol()]] = 1
except AttributeError:
pass
return bitboard
def bitboard_to_board(bitboard):
""" Converts bitboard to board.
TODO: unit test.
bitboard: iterable of _bitboard_length 1s and 0s
Return value: chess.Board representation of bitboard
"""
fen = [[] for _ in xrange(8)]
for i in xrange(8):
streak = 0
for j in xrange(8):
segment = 12*(8*i + j)
piece = None
for offset in xrange(12):
if bitboard[segment + offset]:
piece = _reverse_offsets[offset]
if piece is not None:
if streak: fen[i].append(str(streak))
fen[i].append(piece)
streak = 0
else:
streak += 1
if j == 7: fen[i].append(str(streak))
return chess.Board(
'/'.join([''.join(row) for row in fen][::-1]) + ' w KQkq - 0 1'
)
def bitboard_to_key(bitboard):
""" Converts bitboard to ASCII representation used as key in SQL database.
bitboard: bitboard representation of chess board
Return value: ASCII representation of bitboard
"""
to_unhexlify = '%x' % int(''.join(map(str, map(int, bitboard))), 2)
try:
return binascii.unhexlify(to_unhexlify)
except TypeError:
return binascii.unhexlify('0' + to_unhexlify)
def key_to_bitboard(key):
""" Converts ASCII representation of board to bitboard.
key: ASCII representation of bitboard
Return value: bitboard (binary list)
"""
unpadded = [
int(digit) for digit in bin(int(binascii.hexlify(key), 16))[2:]]
return [0 for _ in xrange(_bitboard_length - len(unpadded))] + unpadded
def invert_board(board):
""" Computes bitboard of given position but with inverted colors. """
inversevector = [0 for _ in xrange(_bitboard_length)]
for i in xrange(64):
try:
inversevector[i * 12
+ _reverse_colors_offsets[board.piece_at(i).symbol()]] = 1
except AttributeError:
pass
return inversevector
def flip_board(board):
""" Computes bitboard of the mirror image of a given position. """
flipvector = [0 for _ in xrange(_bitboard_length)]
for i in range(8):
for j in range(8):
try:
flipvector[12*(8*i + 7 - j)
+ _offsets[board.piece_at(8*i + j).symbol()]] = 1
except AttributeError:
pass
return flipvector
def reverse_and_flip(board):
""" Computes bitboard after flipping position and reversing colors.
board: object of type chess.Board
Return value: flipped bitboard
"""
reversevector = [0 for _ in xrange(_bitboard_length)]
for i in range(8):
for j in range(8):
try:
reversevector[12*(8*i + 7 - j)
+ _reverse_colors_offsets[
board.piece_at(8*i + j).symbol()]] = 1
except AttributeError:
pass
return reversevector
class ChexIndex(object):
""" Manages game states from Annoy Index and SQL database. """
def __init__(self, chex_index, id_label='FICSGamesDBGameNo',
first_indexed_move=10, n_trees=200, seed=1,
scratch=None, learning_rate=1, min_iterations=100,
max_iterations=5000000, difference=.1):
""" Number of dimensions is always 8 x 8 x 12; there are 6 black piece
types, six white piece types, and the board is 8 x 8."""
self.annoy_index = AnnoyIndex(_bitboard_length, metric='angular')
self.id_label = id_label
self.first_indexed_move = first_indexed_move
self.chex_index = chex_index
try:
os.makedirs(self.chex_index)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Create temporary directory
if scratch is not None:
try:
os.makedirs(self.scratch)
except OSError as e:
if e.errno != errno.EEXIST:
raise
self.scratch = tempfile.mkdtemp(dir=scratch)
# Schedule temporary directory for deletion
atexit.register(shutil.rmtree, self.scratch, ignore_errors=True)
self.chex_sql = SqliteDict(
os.path.join(self.chex_index, 'sqlite.idx'))
self.game_sql = SqliteDict(
os.path.join(self.scratch, 'temp.idx')
)
self.game_number = 0
self.n_trees = n_trees
# For reproducibly randomly drawing boards
self.seed = seed
self.learning_rate = learning_rate
self.min_iterations = min_iterations
self.max_iterations = max_iterations
self.difference = difference
self.weights = [1. for _ in xrange(_bitboard_length)]
def add_game(self, node):
""" Adds game parsed by chess library to chex SQL database.
node: game object of type chess.pgn.Game
Return value: 0 if game added successfully, else 1
"""
if node is None:
return 1
game_id = node.headers[self.id_label]
move_number = 0
for move_number in xrange(self.first_indexed_move - 1):
try:
node = node.variations[0]
except IndexError:
# Too few moves to index
return 0
while True:
move_number += 1
bitboard = board_to_bitboard(node.board())
inversevector = invert_board(node.board())
flipvector = flip_board(node.board())
reversevector = reverse_and_flip(node.board())
# Store as ASCII; use minimum of strategically equivalent boards
# See https://github.com/samirsen/chex/issues/1 for details
key = min(map(bitboard_to_key,
[bitboard, inversevector, flipvector, reversevector]))
if key in self.chex_index:
self.chex_sql[key] = self.chex_sql[key] + [
(game_id, move_number)
]
else:
self.chex_sql[key] = [(game_id, move_number)]
if self.game_number in self.game_sql:
self.game_sql[self.game_number] = self.game_sql[
self.game_number] + [key]
else:
self.game_sql[self.game_number] = [key]
if node.is_end(): break
node = node.variations[0]
self.game_number += 1
return 0
def _mahalanobis_loss(self,
reference_bitboard, plus_bitboard, minus_bitboard):
""" Computes value of loss function for finding Mahalanobis metric.
reference_bitboard, plus_bitboard, minus_bitboard: explained
in algo
Return value: value of loss function
"""
return max(0.,
1. + sum([minus_bitboard[i]
* reference_bitboard[i] * self.weights[i]
for i in xrange(_bitboard_length)])
- sum([plus_bitboard[i]
* reference_bitboard[i] * self.weights[i]
for i in xrange(_bitboard_length)]))
def _mahalanobis(self):
""" Computes sparse Mahalanobis metric using algorithm from paper.
The reference is SOML: Sparse online metric learning with
application to image retrieval by Gao et al. We implement
their algorithm 1: SOML-TG (sparse online metric learning via
truncated gradient). We set lambda = 0 and use no
sparsity-promoting regularization term.
Return value: diagonal of Mahalanobis metric
"""
# Finalize game SQL database for querying
self.game_sql.commit()
# For reproducible random draws from database
random.seed(self.seed)
last_weights = [0 for _ in xrange(_bitboard_length)]
iteration, critical_iteration = 0, self.min_iterations
whatever = 0
while True:
# Draw game
game_index = random.randint(0, self.game_number - 1)
# Check that the sampled boards are shuffled
# Is the Python algo reservoir sampling? If so yes.
try:
[reference_bitboard,
plus_bitboard, minus_bitboard] = random.sample(
list(
enumerate(
map(key_to_bitboard, self.game_sql[game_index])
)
), 3
)
except ValueError:
# Not enough moves in game to index
continue
reference_bitboard, plus_bitboard, minus_bitboard = (
list(reference_bitboard), list(plus_bitboard),
list(minus_bitboard)
)
for bitboard in reference_bitboard, plus_bitboard, minus_bitboard:
norm_constant = 1. / sqrt(sum(bitboard[1]))
bitboard[1] = [component * norm_constant
for component in bitboard[1]]
if abs(minus_bitboard[0] - reference_bitboard[0]) < abs(
plus_bitboard[0] - reference_bitboard[0]):
minus_bitboard, plus_bitboard = plus_bitboard, minus_bitboard
if self._mahalanobis_loss(reference_bitboard[1],
plus_bitboard[1], minus_bitboard[1]
) > 0:
v = [self.weights[i] - self.learning_rate
* reference_bitboard[1][i]
* (plus_bitboard[1][i] - minus_bitboard[1][i])
for i in xrange(_bitboard_length)]
self.weights = [max(0, v[j]) if v[j] >= 0 else min(0, v[j])
for j in xrange(_bitboard_length)]
iteration += 1
if iteration >= critical_iteration:
print critical_iteration
print self.weights
if sqrt(sum([(last_weights[i] - self.weights[i])**2
for i in xrange(_bitboard_length)])) <= (
self.difference):
# Must sqrt so angular distance in annoy works
self.weights = [sqrt(weight) for weight in self.weights]
break
last_weights = copy.copy(self.weights)
critical_iteration *= 2
if iteration >= self.max_iterations:
# Must sqrt so angular distance in annoy works
print self.weights
self.weights = [sqrt(weight) for weight in self.weights]
break
def _annoy_index(self):
""" Adds all boards from chex SQL database to Annoy index
No return value.
"""
for i, key in enumerate(self.chex_sql):
bitboard = key_to_bitboard(key)
self.annoy_index.add_item(i, [self.weights[j] * bitboard[j]
for j in xrange(_bitboard_length)])
def save(self):
# Compute Mahalanobis matrix
self._mahalanobis()
# Create annoy index
self._annoy_index()
self.annoy_index.build(self.n_trees)
# Save all index files
super(ChexIndex, self).save(
os.path.join(self.chex_index, 'annoy.idx')
)
self.chex_sql.commit()
self.chex_sql.close()
self.game_sql.close()
# Clean up
shutil.rmtree(self.scratch, ignore_errors=True)
class ChexSearch(object):
""" Searches Chex index for game states and associated games. """
#TODO: Combine results of board transforms with binary search algo.
def __init__(self, chex_index, results=10, search_k=40):
self.chex_index = chex_index
self.results = results
self.search_k = search_k
self.annoy_index = AnnoyIndex(_bitboard_length, metric='angular')
self.annoy_index.load(os.path.join(self.chex_index, 'annoy.idx'))
self.chex_sql = SqliteDict(
os.path.join(self.chex_index, 'sqlite.idx'))
def search(self, board):
""" Searches for board.
board: game object of type chess.Board
Return value: [
(board, similarity score, [(game_id, move number), ...]), ...]
"""
symmetrical_boards = [board_to_bitboard(board),
invert_board(board),
flip_board(board),
reverse_and_flip(board)]
results = []
for bitboard in symmetrical_boards:
for annoy_id, similarity in zip(
*self.annoy_index.get_nns_by_vector(
bitboard, self.results,
include_distances=True
)):
# Recompute ASCII key
bitboard = self.annoy_index.get_item_vector(annoy_id)
to_unhexlify = '%x' % int(''.join(
map(str, map(int, bitboard))), 2)
try:
key = binascii.unhexlify(to_unhexlify)
except TypeError:
key = binascii.unhexlify('0' + to_unhexlify)
results.append((bitboard_to_board(bitboard), similarity,
self.chex_sql[key]))
return results
def close(self):
del self.annoy_index
if __name__ == '__main__':
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=_help_intro,
formatter_class=help_formatter)
subparsers = parser.add_subparsers(help=(
'subcommands; add "-h" or "--help" '
'after a subcommand for its parameters'),
dest='subparser_name'
)
index_parser = subparsers.add_parser(
'index',
help='creates index of chess game states'
)
search_parser = subparsers.add_parser(
'search',
help=('searches for chess game states similar to '
'those input by user')
)
index_parser.add_argument('-f', '--first-indexed-move',
metavar='<int>', type=int, required=False,
default=10,
help=('indexes only those game states at least this many moves '
'into a given game')
)
index_parser.add_argument('-p', '--pgns', metavar='<files>', nargs='+',
required=True, type=str,
help='space-separated list of PGNs to index'
)
index_parser.add_argument('-i', '--id-label', metavar='<str>',
required=False, type=str,
default='FICSGamesDBGameNo',
help='game ID label from metadata in PGN files'
)
index_parser.add_argument('-x', '--chex-index', metavar='<dir>',
required=True, type=str,
help='directory in which to store chex index files'
)
# Test various values!
index_parser.add_argument('--n-trees', metavar='<int>', type=int,
required=False,
default=200,
help='number of annoy trees'
)
index_parser.add_argument('--scratch', metavar='<dir>', type=str,
required=False,
default=None,
help=('where to store temporary files; default is securely '
'created directory in $TMPDIR or similar'))
index_parser.add_argument('--learning-rate', metavar='<dec>', type=float,
required=False,
default=1,
help='learning rate for Mahalanobis metric')
index_parser.add_argument('--min-iterations', metavar='<int>', type=int,
required=False,
default=100,
help='minimum number of iterations for learning Mahalanobis metric'
)
index_parser.add_argument('--max-iterations', metavar='<int>', type=int,
required=False,
default=100,
help='maximum number of iterations for learning Mahalanobis metric'
)
index_parser.add_argument('--difference', metavar='<dec>', type=float,
required=False,
default=.1,
help=('maximum Euclidean distance between Mahalanobis matrices '
'for deciding convergence')
)
search_parser.add_argument('-f', '--board-fen', metavar='<file>',
required=True, type=str,
help='first field of FEN describing board to search for')
search_parser.add_argument('-x', '--chex-index', metavar='<dir>',
required=True, type=str,
help='chex index directory'
)
# Test various values!
search_parser.add_argument('--search-k', metavar='<int>',
required=False, type=int,
default=-1,
help='annoy search-k; default is results * n_trees'
)
search_parser.add_argument('--results', metavar='<int>',
required=False, type=int,
default=10,
help='maximum number of returned game states'
)
parser.add_argument('--verbose', action='store_const', const=True,
default=False,
help='be talkative'
)
args = parser.parse_args()
# Configure this a little later
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO,
format='%(asctime)s %(levelname)-10s %(message)s',
datefmt='%m-%d-%Y %H:%M:%S')
console = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
if args.subparser_name == 'index':
index = ChexIndex(args.chex_index, id_label=args.id_label,
first_indexed_move=args.first_indexed_move,
n_trees=args.n_trees, scratch=args.scratch,
learning_rate=args.learning_rate,
min_iterations=args.min_iterations,
max_iterations=args.max_iterations,
difference=args.difference)
for pgn in args.pgns:
game_count = 0
with open(pgn) as pgn_stream:
while True:
if index.add_game(chess.pgn.read_game(pgn_stream)):
break
game_count += 1
print 'Read {} games...\r'.format(game_count),
sys.stdout.flush()
# TODO: clean up display of this
print 'Read {} games.'.format(game_count)
index.save()
else:
assert args.subparser_name == 'search'
searcher = ChexSearch(args.chex_index,
results=args.results, search_k=args.search_k)
# Pretty print results
print '\t'.join(
['rank', 'board FEN', 'similarity score', 'games',
'move numbers']
)
for (rank, (board, similarity, games)) in enumerate(searcher.search(
chess.Board(args.board_fen + ' w KQkq - 0 1')
)):
games = zip(*games)
print '\t'.join([
str(rank + 1), board.board_fen(), str(similarity),
','.join(games[0]), ','.join(map(str, games[1]))
])
# Close may avoid shutdown exception for unknown reason
searcher.close()
|
{
"content_hash": "a7d091ae1b08fee7ddf5100d0533fc15",
"timestamp": "",
"source": "github",
"line_count": 577,
"max_line_length": 79,
"avg_line_length": 38.06412478336222,
"alnum_prop": 0.5332149524199791,
"repo_name": "samirsen/chex",
"id": "786f5dd17b4bbd14256184d04985cba1db4b1c60",
"size": "21985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21985"
}
],
"symlink_target": ""
}
|
import serial
import database
import protocol
import time
import power_indication
class SkladInterface:
def __init__(self):
#self.serial = serial.Serial(0)
#self.serial = serial.serial_for_url('loop://', timeout=1)
self.serial = serial.Serial("/dev/ttyAMA0", baudrate=115200, timeout=1)
self.protocol = protocol.protocol_t(self.serial.write)
distances = self.readConf("distances.conf.local")
self.locked = 0
self.start_pos = distances[1]
self.end_pos = distances[2]
self.tray_count = distances[0]
self.writePower()
self.read()
time.sleep(4)
self.writeInit()
time.sleep(6)
self.writeMonitorPower()
self.read()
def readConf(self, filename):
distances = []
fp = open(filename, "r");
for line in fp:
if line[0] is not "#":
for s in line.split():
if s.isdigit():
distances.append(int(s))
fp.close()
return distances
def writeMonitorPower(self):
self.protocol.send(0x07, [0x01], self.responseSended)
self.locked = 1
def writeMove(self, pos):
pos = self.start_pos + ((self.end_pos-self.start_pos)/(self.tray_count-1))*pos
self.protocol.send(0x02, [pos & 0xFF, (pos>>8) & 0xFF], self.responseSended)
self.locked = 1
def writePull(self, len):
self.protocol.send(0x03, [len & 0xFF, (len>>8) & 0xFF], self.responseSended)
self.locked = 1
def writeCut(self):
self.protocol.send(0x04, [], self.responseSended)
self.locked = 1
def writeInit(self):
self.protocol.send(0x05, [], self.responseSended)
self.locked = 1
def writePower(self):
self.protocol.send(0x06, [0x01], self.responseSended)
self.locked = 1
def responseSended(self, packetNum, argv):
self.locked = 0
def read(self):
time.sleep(0.1)
data = self.serial.read()
self.protocol.parse(data)
class Daemon:
def __init__(self):
self.db = database.Database()
self.sklad = SkladInterface()
self.power_indication = power_indication.PowerIndication()
print self.db.getFirstCommand()
def loop(self):
if self.power_indication.loop():
self.uninitialize()
self.power_indication.powerOff()
self.sklad.read()
print "Loop"
command = self.db.getFirstCommand()
if command:
print "Novy prikaz"
result = self.writeCommand(command)
if result == 0:
self.db.moveCommand(command[0], -1)
else:
print "Sklad error: %d" % (result)
def writeCommand(self, command):
print "writing move %d" % (command[5])
self.sklad.writeMove(command[5])
time.sleep(1)
print "writing pull"
self.sklad.writePull(command[6])
time.sleep(0.5*command[6])
print "writing cut"
self.sklad.writeCut()
time.sleep(0.5)
print "finished writing command"
return 0
def uninitialize(self):
pass;
daemon = Daemon()
while True:
daemon.loop()
|
{
"content_hash": "7f8a32b1492a9f7ede75f8a2c3cc34d7",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 86,
"avg_line_length": 33.01020408163265,
"alnum_prop": 0.5758887171561051,
"repo_name": "honya121/sklad-daemon",
"id": "4573d189d92e48d3ad0f7b0be16c51ecc7bdd6a7",
"size": "3235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11109"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.