text stringlengths 0 1.05M | meta dict |
|---|---|
# all country info
from __future__ import unicode_literals
import os, json, webnotes
def get_country_info(country=None):
data = get_all()
data = webnotes._dict(data.get(country, {}))
if not 'date_format' in data:
data.date_format = "dd-mm-yyyy"
return data
def get_all():
with open(os.path.join(os.path.dirname(__file__), "country_info.json"), "r") as local_info:
all_data = json.loads(local_info.read())
return all_data
@webnotes.whitelist()
def get_country_timezone_info():
import pytz
return {
"country_info": get_all(),
"all_timezones": pytz.all_timezones
}
def update():
with open(os.path.join(os.path.dirname(__file__), "currency_info.json"), "r") as nformats:
nformats = json.loads(nformats.read())
all_data = get_all()
for country in all_data:
data = all_data[country]
data["number_format"] = nformats.get(data.get("currency", "default"),
nformats.get("default"))["display"]
print all_data
with open(os.path.join(os.path.dirname(__file__), "country_info.json"), "w") as local_info:
local_info.write(json.dumps(all_data, indent=1))
| {
"repo_name": "rohitw1991/adbwnf",
"path": "webnotes/country_info.py",
"copies": "6",
"size": "1092",
"license": "mit",
"hash": 7882626708285224000,
"line_mean": 25.6341463415,
"line_max": 92,
"alpha_frac": 0.6749084249,
"autogenerated": false,
"ratio": 2.8511749347258486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6526083359625849,
"avg_score": null,
"num_lines": null
} |
__all__ = ["Crawler", "CrawlerCollection", "NPCore"]
from random import choice as random_choice, uniform as random_float
from threading import Thread
from types import MethodType
from typing import Callable, List, Optional, Set, Union
from weakref import ReferenceType, WeakMethod
from nichtparasoup.core.image import Image, ImageCollection, ImageUri
from nichtparasoup.core.imagecrawler import BaseImageCrawler
_CrawlerWeight = Union[int, float] # constraint: > 0
class _Blacklist(Set[ImageUri]):
pass
_IsImageAddable = Callable[[Image], bool]
_OnImageAdded = Callable[[Image], None]
_OnFill = Callable[["Crawler", int], None]
_FILLUP_TIMEOUT_DEFAULT = 1.0
class Crawler(object):
def __init__(self, imagecrawler: BaseImageCrawler, weight: _CrawlerWeight,
is_image_addable: Optional[_IsImageAddable] = None,
on_image_added: Optional[_OnImageAdded] = None) -> None: # pragma: no cover
if weight <= 0:
raise ValueError('weight <= 0')
self.imagecrawler = imagecrawler
self.weight = weight
self.images = ImageCollection()
self._is_image_addable_wr = None # type: Optional[ReferenceType[_IsImageAddable]]
self._image_added_wr = None # type: Optional[ReferenceType[_OnImageAdded]]
self.set_is_image_addable(is_image_addable)
self.set_image_added(on_image_added)
def set_is_image_addable(self, is_image_addable: Optional[_IsImageAddable]) -> None:
t_is_image_addable = type(is_image_addable)
if None is is_image_addable:
self._is_image_addable_wr = None
elif MethodType is t_is_image_addable:
self._is_image_addable_wr = WeakMethod(is_image_addable) # type: ignore
else:
raise Exception('type {} not supported, yet'.format(t_is_image_addable))
# TODO: add function and other types - and write proper tests for it
def get_is_image_addable(self) -> Optional[_IsImageAddable]:
return self._is_image_addable_wr() if self._is_image_addable_wr else None
def set_image_added(self, image_added: Optional[_OnImageAdded]) -> None:
t_image_added = type(image_added)
if None is image_added:
self._image_added_wr = None
elif MethodType is t_image_added:
self._image_added_wr = WeakMethod(image_added) # type: ignore
else:
raise Exception('type {} not supported, yet'.format(t_image_added))
# TODO: add function and other types - and write proper tests for it
def get_image_added(self) -> Optional[_OnImageAdded]:
return self._image_added_wr() if self._image_added_wr else None
def reset(self) -> None:
self.images.clear()
self.imagecrawler.reset()
def crawl(self) -> int:
is_image_addable = self.get_is_image_addable()
image_added = self.get_image_added()
images_crawled = self.imagecrawler.crawl()
for image_crawled in images_crawled:
addable = is_image_addable(image_crawled) if is_image_addable else True
if not addable:
continue # for
self.images.add(image_crawled)
if image_added:
image_added(image_crawled)
return len(images_crawled)
def fill_up_to(self, to: int,
filled_by: Optional[_OnFill] = None,
timeout: float = _FILLUP_TIMEOUT_DEFAULT) -> None:
from time import sleep
while len(self.images) < to:
refilled = self.crawl()
if filled_by:
filled_by(self, refilled)
if 0 == refilled:
break # while
if len(self.images) < to and timeout > 0:
# be nice, give the site some rest after crawling
sleep(timeout)
def get_random_image(self) -> Optional[Image]:
if not self.images:
return None
image = random_choice(list(self.images))
return image
def pop_random_image(self) -> Optional[Image]:
image = self.get_random_image()
if image:
self.images.discard(image)
return image
class CrawlerCollection(List[Crawler]):
def _random_weight(self) -> _CrawlerWeight:
return random_float(0, sum(crawler.weight for crawler in self))
def get_random(self) -> Optional[Crawler]:
cum_weight_goal = self._random_weight()
# IDEA: cum_weight_goal == 0 is an edge case and could be handled if needed ...
cum_weight = 0 # type: _CrawlerWeight
for crawler in self:
cum_weight += crawler.weight
if cum_weight >= cum_weight_goal:
return crawler
return None
class NPCore(object):
def __init__(self) -> None: # pragma: no cover
self.crawlers = CrawlerCollection()
self.blacklist = _Blacklist()
def _is_image_not_in_blacklist(self, image: Image) -> bool:
# must be compatible to: _IsImageAddable
return image.uri not in self.blacklist
def _add_image_to_blacklist(self, image: Image) -> None:
# must be compatible to: _OnImageAdded
if not image.is_generic:
self.blacklist.add(image.uri)
def has_imagecrawler(self, imagecrawler: BaseImageCrawler) -> bool:
return imagecrawler in (crawler.imagecrawler for crawler in self.crawlers)
def add_imagecrawler(self, imagecrawler: BaseImageCrawler, weight: _CrawlerWeight) -> None:
self.crawlers.append(Crawler(
imagecrawler, weight,
self._is_image_not_in_blacklist, self._add_image_to_blacklist
))
def fill_up_to(self, to: int, on_refill: Optional[_OnFill], timeout: float = _FILLUP_TIMEOUT_DEFAULT) -> None:
fill_treads = list() # type: List[Thread]
for crawler in self.crawlers:
fill_tread = Thread(target=crawler.fill_up_to, args=(to, on_refill, timeout), daemon=True)
fill_treads.append(fill_tread)
fill_tread.start()
for fill_tread in fill_treads:
fill_tread.join()
def reset(self) -> int:
reset_treads = list() # type: List[Thread]
for crawler in self.crawlers.copy():
reset_tread = Thread(target=crawler.reset, daemon=True)
reset_treads.append(reset_tread)
reset_tread.start()
blacklist_len = len(self.blacklist)
self.blacklist.clear()
for reset_tread in reset_treads:
reset_tread.join()
return blacklist_len
| {
"repo_name": "k4cg/nichtparasoup",
"path": "nichtparasoup/core/__init__.py",
"copies": "1",
"size": "6554",
"license": "mit",
"hash": -4359380360016446500,
"line_mean": 37.1046511628,
"line_max": 114,
"alpha_frac": 0.6229783338,
"autogenerated": false,
"ratio": 3.671708683473389,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4794687017273389,
"avg_score": null,
"num_lines": null
} |
__all__ = ['crc16', 'get_hexdigest', 'luhn']
def crc16(buff, crc=0, poly=0xa001):
l = len(buff)
i = 0
while i < l:
ch = ord(buff[i])
uc = 0
while uc < 8:
if (crc & 1) ^ (ch & 1):
crc = (crc >> 1) ^ poly
else:
crc >>= 1
ch >>= 1
uc += 1
i += 1
return crc
def get_hexdigest(algorithm, salt, s):
""" Returns a string of the hexdigest of the given string and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
import sys
if sys.version_info >= (2, 5):
import hashlib
md5_constructor = hashlib.md5
md5_hmac = md5_constructor
sha_constructor = hashlib.sha1
sha_hmac = sha_constructor
else:
import md5
md5_constructor = md5.new
md5_hmac = md5
import sha
sha_constructor = sha.new
sha_hmac = sha
if algorithm == 'crypt':
try:
import crypt
except ImportError:
raise ValueError('"crypt" algorithm not supported in this '
'environment')
return crypt.crypt(s, salt)
if algorithm == 'md5':
return md5_constructor(salt + s).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + s).hexdigest()
raise ValueError("Got unknown algorithm type.")
LUHN_ODD_LOOKUP = (0, 2, 4, 6, 8, 1, 3, 5, 7, 9) # sum_of_digits(index * 2)
def luhn(candidate):
""" Checks a candidate number for validity according to the Luhn
algorithm (used in validation of, for example, credit cards).
Both numeric and string candidates are accepted.
"""
if not isinstance(candidate, basestring):
candidate = str(candidate)
try:
evens = sum([int(c) for c in candidate[-1::-2]])
odds = sum([LUHN_ODD_LOOKUP[int(c)] for c in candidate[-2::-2]])
return ((evens + odds) % 10 == 0)
except ValueError: # Raised if an int conversion fails
return False
| {
"repo_name": "sprymak/metacorus-django-utils",
"path": "mcutils/checksums.py",
"copies": "1",
"size": "2036",
"license": "mit",
"hash": 760560266945768000,
"line_mean": 28.0857142857,
"line_max": 76,
"alpha_frac": 0.5510805501,
"autogenerated": false,
"ratio": 3.7495395948434624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4800620144943462,
"avg_score": null,
"num_lines": null
} |
__all__ = ["createm3ulib"]
import os
from os import path
class createm3ulib:
def __init__(self, basedir, dryrun):
if(path.exists(basedir) == False):
raise Exception(RuntimeError, 'Base Directory does not exist')
self.basedir = basedir
self.dryrun = dryrun
self.file_exclude = ['.DS_Store', '.m3u', '.jpg', '.zip']
def do(self):
current_dir = ''
found_files = []
for subdir, dirs, files in os.walk(self.basedir):
for file in files:
if(subdir == self.basedir):
continue
parent_dir = self.get_parentdir(subdir)
if(len(found_files) == 0 and parent_dir != current_dir):
current_dir = parent_dir
if(current_dir != parent_dir):
self.save_m3u(current_dir, found_files)
found_files = []
current_dir = parent_dir
filepath = subdir + os.sep + file
file_ext = self.get_extension(filepath)
if(file_ext not in self.file_exclude and file not in self.file_exclude):
found_files.append(filepath)
if(len(found_files) > 0):
self.save_m3u(current_dir, found_files)
def get_parentdir(self, directory):
return '/'.join(directory.split('/')[0:-1])
def get_curdirectory_name(self, directory):
return directory.split('/')[-1]
def save_m3u(self, basedir, files):
cur_directory_name = self.get_curdirectory_name(basedir)
relative_files = list(map(lambda x: x.replace(basedir + "/", "\n"), files))
m3u_file = f'{basedir}/{cur_directory_name}.m3u'
print(f'Saving M3U file in: {m3u_file}')
if(self.dryrun == False):
file = open(m3u_file, 'w')
file.writelines(relative_files)
file.close()
def get_extension(self, filename):
filename, file_extension = os.path.splitext(filename)
return file_extension | {
"repo_name": "TheDarkTrumpet/MusicOrganization",
"path": "createm3u/createm3ulib/createm3ulib.py",
"copies": "1",
"size": "2042",
"license": "mit",
"hash": 1028688965109731500,
"line_mean": 35.4821428571,
"line_max": 88,
"alpha_frac": 0.5519098923,
"autogenerated": false,
"ratio": 3.838345864661654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9875944055690136,
"avg_score": 0.0028623402543036082,
"num_lines": 56
} |
__all__ = ["create_parser"]
from argparse import ArgumentParser
from typing import Any, Set
from argcomplete import FilesCompleter # type: ignore
from nichtparasoup.imagecrawler import get_imagecrawlers
def imagecrawler_completion(*args: Any, **kwargs: Any) -> Set[str]:
return set(get_imagecrawlers().names())
yaml_file_completion = FilesCompleter(('yaml', 'yml'))
def create_parser() -> ArgumentParser: # pragma: no cover
# used `__tmp_action` several times, to omit type-checkers warning ala 'Action has no attribute "completer"'
parser = ArgumentParser(
add_help=True,
allow_abbrev=False,
)
parser.add_argument(
'--debug',
help='Enable debug output.',
action='store_true', dest="debug",
)
commands = parser.add_subparsers(
title='Commands',
metavar='<command>',
dest='command',
)
commands.required = True
command_run = commands.add_parser(
'run',
help='run a server',
description='Start a web-server to display random images.',
add_help=True,
allow_abbrev=False,
)
__tmp_action = command_run.add_argument(
'-c', '--use-config',
help='Use a YAML config file instead of the defaults.',
metavar='<file>',
action='store', dest="config_file", type=str,
)
__tmp_action.completer = yaml_file_completion # type: ignore
del __tmp_action
command_config = commands.add_parser(
'config',
description='Get config related things done.',
help='Config related functions.',
add_help=True,
allow_abbrev=False,
)
command_config_switches = command_config.add_mutually_exclusive_group(required=True)
__tmp_action = command_config_switches.add_argument(
'--check',
help='Validate and probe a YAML config file;',
metavar='<file>',
action='store', dest='check', type=str,
)
__tmp_action.completer = yaml_file_completion # type: ignore
del __tmp_action
command_config_switches.add_argument(
'--dump',
help='Dump YAML config into a file;',
metavar='<file>',
action='store', dest='dump', type=str,
)
command_info = commands.add_parser(
'info',
description='Get info for several topics.',
help='Get info for several topics.',
add_help=True,
allow_abbrev=False,
)
command_info_switches = command_info.add_mutually_exclusive_group(required=True)
command_info_switches.add_argument(
'--imagecrawler-list',
help='List available image crawler types.',
action='store_true', dest='imagecrawler_list',
)
__tmp_action = command_info_switches.add_argument(
'--imagecrawler-desc',
help='Describe an image crawler type and its config.',
metavar='<crawler>',
action='store', dest='imagecrawler_desc', type=str,
)
__tmp_action.completer = imagecrawler_completion # type: ignore
del __tmp_action
command_info_switches.add_argument(
'--version',
help="Show program's version number.",
action='store_true', dest='version',
)
command_completion = commands.add_parser(
'completion',
description='Helper command used for command completion.',
epilog='Autocompletion is powered by https://pypi.org/project/argcomplete/',
help='Helper command to be used for command completion.',
add_help=True,
allow_abbrev=False,
)
command_completion.add_argument(
'-s', '--shell',
help='Emit completion code for the specified shell.',
action='store', dest='shell', type=str, required=True,
choices=('bash', 'tcsh', 'fish'),
)
return parser
| {
"repo_name": "k4cg/nichtparasoup",
"path": "nichtparasoup/cli/parser.py",
"copies": "1",
"size": "3803",
"license": "mit",
"hash": -8415263277507080000,
"line_mean": 30.6916666667,
"line_max": 113,
"alpha_frac": 0.6192479621,
"autogenerated": false,
"ratio": 4.102481121898598,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5221729083998597,
"avg_score": null,
"num_lines": null
} |
__all__ = ['create_proxy', 'POST_BODY', 'POST_RESPONSE']
from akara import request
#Create a proxy function for a service available over HTTP
def create_proxy(service_id, method, resultmap, *xarg, **xkwarg):
"""Add the function as an Zen service
This affect how the resource is registered in Zen:
service_id - a string which identifies this service; should be a URL
"""
endpoint = find_peer_service(request.environ, service_id)
def premap(env, arg, kwarg):
if POST_BODY in xarg:
index = xarg.index(POST_BODY)
env['body'] = arg[index]
return
def postmap(env, resp, content):
result = [None]*len(resultmap)
if POST_RESPONSE in resultmap:
index = resultmap.index(POST_RESPONSE)
result[index] = content
if not isinstance(resultmap, tuple):
result = result[0]
env['result'] = result
return
def proxy_func(*arg, **kwarg):
#FIXME: Implement URL templates
#FIXME: Support headers spec
premap(locals(), arg, kwarg)
resp, content = H.request(endpoint, method, body=body, headers=headers)
postmap(locals(), resp, content)
return result
proxy_func.serviceid = service_id
register_service(proxy_func)
return func
#
#Singletons to indicate how HTTP constructs are interpreted
POST_BODY = object()
POST_RESPONSE = object()
| {
"repo_name": "zepheira/zenpub",
"path": "lib/httpmodel.py",
"copies": "2",
"size": "1427",
"license": "apache-2.0",
"hash": 2478063050932884500,
"line_mean": 30.0217391304,
"line_max": 79,
"alpha_frac": 0.6355991591,
"autogenerated": false,
"ratio": 3.9529085872576175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5588507746357617,
"avg_score": null,
"num_lines": null
} |
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
import collections
import subprocess
from . import events
from . import futures
from . import protocols
from . import streams
from . import tasks
from .coroutines import coroutine
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append('stdin=%r' % self.stdin)
if self.stdout is not None:
info.append('stdout=%r' % self.stdout)
if self.stderr is not None:
info.append('stderr=%r' % self.stderr)
return '<%s>' % ' '.join(info)
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader != None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
def process_exited(self):
self._transport.close()
self._transport = None
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.pid)
@property
def returncode(self):
return self._transport.get_returncode()
@coroutine
def wait(self):
"""Wait until the process exit and return the process return code.
This method is a coroutine."""
return (yield from self._transport._wait())
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
@coroutine
def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug('%r communicate: feed stdin (%s bytes)',
self, len(input))
try:
yield from self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
@coroutine
def _noop(self):
return None
@coroutine
def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = yield from stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
@coroutine
def communicate(self, input=None):
if input:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
loop=self._loop)
yield from self.wait()
return (stdout, stderr)
@coroutine
def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
@coroutine
def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
| {
"repo_name": "classmember/proof_of_concept",
"path": "python/events/lib/python3.4/site-packages/asyncio/subprocess.py",
"copies": "16",
"size": "7223",
"license": "mit",
"hash": -603099904172129800,
"line_mean": 32.5953488372,
"line_max": 78,
"alpha_frac": 0.528173889,
"autogenerated": false,
"ratio": 4.642030848329049,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
import collections
import subprocess
from . import events
from . import futures
from . import protocols # 协议接口
from . import streams
from . import tasks
from .coroutines import coroutine
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
#
# SubprocessStream 协议:
#
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append('stdin=%r' % self.stdin)
if self.stdout is not None:
info.append('stdout=%r' % self.stdout)
if self.stderr is not None:
info.append('stderr=%r' % self.stderr)
return '<%s>' % ' '.join(info)
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader != None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
def process_exited(self):
self._transport.close()
self._transport = None
#
# 进程:
#
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.pid)
@property
def returncode(self):
return self._transport.get_returncode()
@coroutine
def wait(self):
"""Wait until the process exit and return the process return code.
This method is a coroutine."""
return (yield from self._transport._wait())
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
@coroutine
def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug('%r communicate: feed stdin (%s bytes)',
self, len(input))
try:
yield from self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
@coroutine
def _noop(self):
return None
@coroutine
def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = yield from stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
@coroutine
def communicate(self, input=None):
if input:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
loop=self._loop)
yield from self.wait()
return (stdout, stderr)
#
# 创建子进程 shell:
#
@coroutine
def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
#
# 执行 shell 命令:
#
transport, protocol = yield from loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
@coroutine
def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
| {
"repo_name": "hhstore/flask-annotated",
"path": "asyncio/asyncio-3.4.3/asyncio/subprocess.py",
"copies": "2",
"size": "7370",
"license": "mit",
"hash": 3259661685650119000,
"line_mean": 31.1754385965,
"line_max": 78,
"alpha_frac": 0.5258996728,
"autogenerated": false,
"ratio": 4.508912108174554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007927243815878446,
"num_lines": 228
} |
__all__ = 'create_subprocess_exec', 'create_subprocess_shell'
import subprocess
import warnings
from . import events
from . import protocols
from . import streams
from . import tasks
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
self._process_exited = False
self._pipe_fds = []
self._stdin_closed = self._loop.create_future()
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append(f'stdin={self.stdin!r}')
if self.stdout is not None:
info.append(f'stdout={self.stdout!r}')
if self.stderr is not None:
info.append(f'stderr={self.stderr!r}')
return '<{}>'.format(' '.join(info))
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
self._pipe_fds.append(1)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
self._pipe_fds.append(2)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
if exc is None:
self._stdin_closed.set_result(None)
else:
self._stdin_closed.set_exception(exc)
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
if fd in self._pipe_fds:
self._pipe_fds.remove(fd)
self._maybe_close_transport()
def process_exited(self):
self._process_exited = True
self._maybe_close_transport()
def _maybe_close_transport(self):
if len(self._pipe_fds) == 0 and self._process_exited:
self._transport.close()
self._transport = None
def _get_close_waiter(self, stream):
if stream is self.stdin:
return self._stdin_closed
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return f'<{self.__class__.__name__} {self.pid}>'
@property
def returncode(self):
return self._transport.get_returncode()
async def wait(self):
"""Wait until the process exit and return the process return code."""
return await self._transport._wait()
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
async def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug(
'%r communicate: feed stdin (%s bytes)', self, len(input))
try:
await self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
async def _noop(self):
return None
async def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = await stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
async def communicate(self, input=None):
if input is not None:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr,
loop=self._loop)
await self.wait()
return (stdout, stderr)
async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
loop=None, limit=streams._DEFAULT_LIMIT,
**kwds):
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8 "
"and scheduled for removal in Python 3.10.",
DeprecationWarning,
stacklevel=2
)
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = await loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8 "
"and scheduled for removal in Python 3.10.",
DeprecationWarning,
stacklevel=2
)
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = await loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
| {
"repo_name": "batermj/algorithm-challenger",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/asyncio/subprocess.py",
"copies": "22",
"size": "8068",
"license": "apache-2.0",
"hash": 8538744635092626000,
"line_mean": 32.4771784232,
"line_max": 77,
"alpha_frac": 0.5397868121,
"autogenerated": false,
"ratio": 4.489705063995548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
import subprocess
from . import events
from . import protocols
from . import streams
from . import tasks
from .coroutines import coroutine
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append('stdin=%r' % self.stdin)
if self.stdout is not None:
info.append('stdout=%r' % self.stdout)
if self.stderr is not None:
info.append('stderr=%r' % self.stderr)
return '<%s>' % ' '.join(info)
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader != None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
def process_exited(self):
self._transport.close()
self._transport = None
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.pid)
@property
def returncode(self):
return self._transport.get_returncode()
@coroutine
def wait(self):
"""Wait until the process exit and return the process return code.
This method is a coroutine."""
return (yield from self._transport._wait())
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
@coroutine
def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug('%r communicate: feed stdin (%s bytes)',
self, len(input))
try:
yield from self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
@coroutine
def _noop(self):
return None
@coroutine
def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = yield from stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
@coroutine
def communicate(self, input=None):
if input is not None:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
loop=self._loop)
yield from self.wait()
return (stdout, stderr)
@coroutine
def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
@coroutine
def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
| {
"repo_name": "anbangleo/NlsdeWeb",
"path": "Python-3.6.0/Lib/asyncio/subprocess.py",
"copies": "8",
"size": "7194",
"license": "mit",
"hash": -1281543367316727000,
"line_mean": 32.7746478873,
"line_max": 78,
"alpha_frac": 0.5268279121,
"autogenerated": false,
"ratio": 4.638297872340425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008485500422630448,
"num_lines": 213
} |
__all__ = 'create_subprocess_exec', 'create_subprocess_shell'
import subprocess
from . import events
from . import protocols
from . import streams
from . import tasks
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
self._process_exited = False
self._pipe_fds = []
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append(f'stdin={self.stdin!r}')
if self.stdout is not None:
info.append(f'stdout={self.stdout!r}')
if self.stderr is not None:
info.append(f'stderr={self.stderr!r}')
return '<{}>'.format(' '.join(info))
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
self._pipe_fds.append(1)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
self._pipe_fds.append(2)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
if fd in self._pipe_fds:
self._pipe_fds.remove(fd)
self._maybe_close_transport()
def process_exited(self):
self._process_exited = True
self._maybe_close_transport()
def _maybe_close_transport(self):
if len(self._pipe_fds) == 0 and self._process_exited:
self._transport.close()
self._transport = None
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return f'<{self.__class__.__name__} {self.pid}>'
@property
def returncode(self):
return self._transport.get_returncode()
async def wait(self):
"""Wait until the process exit and return the process return code."""
return await self._transport._wait()
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
async def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug(
'%r communicate: feed stdin (%s bytes)', self, len(input))
try:
await self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
async def _noop(self):
return None
async def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = await stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
async def communicate(self, input=None):
if input is not None:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr,
loop=self._loop)
await self.wait()
return (stdout, stderr)
async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
loop=None, limit=streams._DEFAULT_LIMIT,
**kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = await loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = await loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
| {
"repo_name": "huguesv/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/asyncio/subprocess.py",
"copies": "11",
"size": "7254",
"license": "apache-2.0",
"hash": 8507266298106950000,
"line_mean": 32.2752293578,
"line_max": 77,
"alpha_frac": 0.5417700579,
"autogenerated": false,
"ratio": 4.436697247706422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['create_test_spatial_db', 'get_geo_where_clause', 'SpatialBackend']
from ctypes.util import find_library
from django.conf import settings
from django.db.backends.signals import connection_created
from django.contrib.gis.db.backend.base import BaseSpatialBackend
from django.contrib.gis.db.backend.spatialite.adaptor import SpatiaLiteAdaptor
from django.contrib.gis.db.backend.spatialite.creation import create_test_spatial_db
from django.contrib.gis.db.backend.spatialite.field import SpatiaLiteField
from django.contrib.gis.db.backend.spatialite.models import GeometryColumns, SpatialRefSys
from django.contrib.gis.db.backend.spatialite.query import *
# Here we are figuring out the path to the SpatiLite library (`libspatialite`).
# If it's not in the system PATH, it may be set manually in the settings via
# the `SPATIALITE_LIBRARY_PATH` setting.
spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH', find_library('spatialite'))
if spatialite_lib:
def initialize_spatialite(sender=None, **kwargs):
"""
This function initializes the pysqlite2 connection to enable the
loading of extensions, and to load up the SpatiaLite library
extension.
"""
from django.db import connection
connection.connection.enable_load_extension(True)
connection.cursor().execute("SELECT load_extension(%s)", (spatialite_lib,))
connection_created.connect(initialize_spatialite)
else:
# No SpatiaLite library found.
raise Exception('Unable to locate SpatiaLite, needed to use GeoDjango with sqlite3.')
SpatialBackend = BaseSpatialBackend(name='spatialite', spatialite=True,
area=AREA,
centroid=CENTROID,
contained=CONTAINED,
difference=DIFFERENCE,
distance=DISTANCE,
distance_functions=DISTANCE_FUNCTIONS,
envelope=ENVELOPE,
from_text=GEOM_FROM_TEXT,
gis_terms=SPATIALITE_TERMS,
intersection=INTERSECTION,
length=LENGTH,
num_geom=NUM_GEOM,
num_points=NUM_POINTS,
point_on_surface=POINT_ON_SURFACE,
scale=SCALE,
select=GEOM_SELECT,
svg=ASSVG,
sym_difference=SYM_DIFFERENCE,
transform=TRANSFORM,
translate=TRANSLATE,
union=UNION,
unionagg=UNIONAGG,
Adaptor=SpatiaLiteAdaptor,
Field=SpatiaLiteField,
GeometryColumns=GeometryColumns,
SpatialRefSys=SpatialRefSys,
)
| {
"repo_name": "CollabQ/CollabQ",
"path": "vendor/django/contrib/gis/db/backend/spatialite/__init__.py",
"copies": "10",
"size": "3213",
"license": "apache-2.0",
"hash": -3394351264907441700,
"line_mean": 52.55,
"line_max": 90,
"alpha_frac": 0.5474634298,
"autogenerated": false,
"ratio": 5.258592471358429,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009480116833058011,
"num_lines": 60
} |
__all__ = ['create_test_spatial_db', 'get_geo_where_clause', 'SpatialBackend']
from ctypes.util import find_library
from django.conf import settings
from django.db.backends.signals import connection_created
from django.contrib.gis.db.backend.base import BaseSpatialBackend
from django.contrib.gis.db.backend.spatialite.adaptor import SpatiaLiteAdaptor
from django.contrib.gis.db.backend.spatialite.creation import create_test_spatial_db
from django.contrib.gis.db.backend.spatialite.field import SpatiaLiteField
from django.contrib.gis.db.backend.spatialite.models import GeometryColumns, SpatialRefSys
from django.contrib.gis.db.backend.spatialite.query import *
# Here we are figuring out the path to the SpatiLite library (`libspatialite`).
# If it's not in the system PATH, it may be set manually in the settings via
# the `SPATIALITE_LIBRARY_PATH` setting.
spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH', find_library('spatialite'))
if spatialite_lib:
def initialize_spatialite(sender=None, **kwargs):
"""
This function initializes the pysqlite2 connection to enable the
loading of extensions, and to load up the SpatiaLite library
extension.
"""
from django.db import connection
connection.connection.enable_load_extension(True)
connection.cursor().execute("SELECT load_extension(%s)", (spatialite_lib,))
connection_created.connect(initialize_spatialite)
else:
# No SpatiaLite library found.
raise Exception('Unable to locate SpatiaLite, needed to use GeoDjango with sqlite3.')
SpatialBackend = BaseSpatialBackend(name='spatialite', spatialite=True,
area=AREA,
centroid=CENTROID,
contained=CONTAINED,
difference=DIFFERENCE,
distance=DISTANCE,
distance_functions=DISTANCE_FUNCTIONS,
envelope=ENVELOPE,
from_text=GEOM_FROM_TEXT,
gis_terms=SPATIALITE_TERMS,
intersection=INTERSECTION,
length=LENGTH,
num_geom=NUM_GEOM,
num_points=NUM_POINTS,
point_on_surface=POINT_ON_SURFACE,
scale=SCALE,
select=GEOM_SELECT,
svg=ASSVG,
sym_difference=SYM_DIFFERENCE,
transform=TRANSFORM,
translate=TRANSLATE,
union=UNION,
unionagg=UNIONAGG,
Adaptor=SpatiaLiteAdaptor,
Field=SpatiaLiteField,
GeometryColumns=GeometryColumns,
SpatialRefSys=SpatialRefSys,
)
| {
"repo_name": "greggian/TapdIn",
"path": "django/contrib/gis/db/backend/spatialite/__init__.py",
"copies": "1",
"size": "3273",
"license": "apache-2.0",
"hash": -8964399336573802000,
"line_mean": 52.55,
"line_max": 90,
"alpha_frac": 0.5374274366,
"autogenerated": false,
"ratio": 5.035384615384616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009373366675751736,
"num_lines": 60
} |
# All credit to github.com/rs
# almost a direct copy of https://github.com/rs/xid
# Changes to make more pythonic as needed.
import hashlib
import os
import platform
import time
import datetime
import threading
import base32hex
# MyPy imports
try:
from typing import List
except:
pass # ignore, we do not need the typing module
# Some Constants
trimLen = 20
encodedLen = 24
decodedLen = 14
rawLen = 12
class InvalidXid(Exception):
pass
def randInt():
# type: () -> int
buf = str(os.urandom(3))
buford = list(map(ord, buf))
return buford[0] << 16 | buford[1] << 8 | buford[2]
def realMachineID():
# type: () -> List[int]
try:
hostname = platform.node()
hw = hashlib.md5()
hw.update(hostname.encode('utf-8'))
val = str(hw.digest()[:3])
return list(map(ord, val))
except:
buf = os.urandom(3)
return list(map(ord, buf))
## Module level items
pid = os.getpid()
machineID = realMachineID()
lock = threading.Lock()
def generateNextId():
id = randInt()
while True:
new_id = id + 1
id += 1
yield new_id
objectIDGenerator = generateNextId()
def generate_new_xid():
# type: () -> List[int]
now = int(time.time())
id = [0] * rawLen
id[0] = (now >> 24) & 0xff
id[1] = (now >> 16) & 0xff
id[2] = (now >> 8) & 0xff
id[3] = (now) & 0xff
id[4] = machineID[0]
id[5] = machineID[1]
id[6] = machineID[2]
id[7] = (pid >> 8) & 0xff
id[8] = (pid) & 0xff
lock.acquire()
i = next(objectIDGenerator)
lock.release()
id[9] = (i >> 16) & 0xff
id[10] = (i >> 8) & 0xff
id[11] = (i) & 0xff
return id
class Xid(object):
def __init__(self, id=None):
# type: (List[int]) -> None
if id is None:
id = generate_new_xid()
self.value = id
def pid(self):
# type: () -> int
return (self.value[7] << 8 | self.value[8])
def counter(self):
# type: () -> int
return (self.value[9] << 16 |
self.value[10] << 8 |
self.value[11])
def machine(self):
# type: () -> str
return ''.join(map(chr, self.value[4:7]))
def datetime(self):
return datetime.datetime.fromtimestamp(self.time())
def time(self):
# type: () -> int
return (self.value[0] << 24 |
self.value[1] << 16 |
self.value[2] << 8 |
self.value[3])
def string(self):
# type: () -> str
byte_value = self.bytes()
return base32hex.b32encode(byte_value).lower()[:trimLen]
def bytes(self):
# type: () -> str
return ''.join(map(chr, self.value))
def __repr__(self):
return "<Xid '%s'>" % self.__str__()
def __str__(self):
return self.string()
def __lt__(self, arg):
# type: (Xid) -> bool
return self.string() < arg.string()
def __gt__(self, arg):
# type: (Xid) -> bool
return self.string() > arg.string()
@classmethod
def from_string(cls, s):
# type: (str) -> Xid
val = base32hex.b32decode(s.upper())
value_check = [0 <= x < 255 for x in val]
if not all(value_check):
raise InvalidXid(s)
return cls(val)
| {
"repo_name": "graham/python_xid",
"path": "xid.py",
"copies": "1",
"size": "3355",
"license": "mit",
"hash": -975038500926340900,
"line_mean": 20.7857142857,
"line_max": 64,
"alpha_frac": 0.5210134128,
"autogenerated": false,
"ratio": 3.2763671875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.926861553931319,
"avg_score": 0.005753012197362115,
"num_lines": 154
} |
__all__ = ['cross',
'dot',
'express',
'outer',
'inertia',
'mechanics_printing',
'mprint',
'mpprint',
'mlatex',
'kinematic_equations',
'inertia_of_point_mass',
'partial_velocity',
'linear_momentum',
'angular_momentum',
'kinetic_energy',
'potential_energy',
'Lagrangian']
from sympy.physics.mechanics.essential import (Vector, Dyadic, ReferenceFrame,
MechanicsStrPrinter,
MechanicsPrettyPrinter,
MechanicsLatexPrinter,
dynamicsymbols)
from sympy.physics.mechanics.particle import Particle
from sympy.physics.mechanics.rigidbody import RigidBody
from sympy.physics.mechanics.point import Point
from sympy import sympify, diff, sin, cos, Matrix
from sympy.core.basic import S
def cross(vec1, vec2):
"""Cross product convenience wrapper for Vector.cross(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Cross product is between two vectors')
return vec1 ^ vec2
cross.__doc__ += Vector.cross.__doc__
def dot(vec1, vec2):
"""Dot product convenience wrapper for Vector.dot(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Dot product is between two vectors')
return vec1 & vec2
dot.__doc__ += Vector.dot.__doc__
def express(vec, frame, frame2=None):
"""Express convenience wrapper for Vector.express(): \n"""
if not isinstance(vec, (Vector, Dyadic)):
raise TypeError('Can only express Vectors')
if isinstance(vec, Vector):
return vec.express(frame)
else:
return vec.express(frame, frame2)
express.__doc__ += Vector.express.__doc__
def outer(vec1, vec2):
"""Outer prodcut convenience wrapper for Vector.outer():\n"""
if not isinstance(vec1, Vector):
raise TypeError('Outer product is between two Vectors')
return vec1 | vec2
outer.__doc__ += Vector.express.__doc__
def inertia(frame, ixx, iyy, izz, ixy=0, iyz=0, izx=0):
"""Simple way to create inertia Dyadic object.
If you don't know what a Dyadic is, just treat this like the inertia
tensor. Then, do the easy thing and define it in a body-fixed frame.
Parameters
==========
frame : ReferenceFrame
The frame the inertia is defined in
ixx : Sympifyable
the xx element in the inertia dyadic
iyy : Sympifyable
the yy element in the inertia dyadic
izz : Sympifyable
the zz element in the inertia dyadic
ixy : Sympifyable
the xy element in the inertia dyadic
iyz : Sympifyable
the yz element in the inertia dyadic
izx : Sympifyable
the zx element in the inertia dyadic
Examples
========
>>> from sympy.physics.mechanics import ReferenceFrame, inertia
>>> N = ReferenceFrame('N')
>>> inertia(N, 1, 2, 3)
(N.x|N.x) + 2*(N.y|N.y) + 3*(N.z|N.z)
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Need to define the inertia in a frame')
ol = sympify(ixx) * (frame.x | frame.x)
ol += sympify(ixy) * (frame.x | frame.y)
ol += sympify(izx) * (frame.x | frame.z)
ol += sympify(ixy) * (frame.y | frame.x)
ol += sympify(iyy) * (frame.y | frame.y)
ol += sympify(iyz) * (frame.y | frame.z)
ol += sympify(izx) * (frame.z | frame.x)
ol += sympify(iyz) * (frame.z | frame.y)
ol += sympify(izz) * (frame.z | frame.z)
return ol
def inertia_of_point_mass(mass, pos_vec, frame):
"""Inertia dyadic of a point mass realtive to point O.
Parameters
==========
mass : Sympifyable
Mass of the point mass
pos_vec : Vector
Position from point O to point mass
frame : ReferenceFrame
Reference frame to express the dyadic in
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.mechanics import ReferenceFrame, inertia_of_point_mass
>>> N = ReferenceFrame('N')
>>> r, m = symbols('r m')
>>> px = r * N.x
>>> inertia_of_point_mass(m, px, N)
m*r**2*(N.y|N.y) + m*r**2*(N.z|N.z)
"""
return mass * (((frame.x | frame.x) + (frame.y | frame.y) +
(frame.z | frame.z)) * (pos_vec & pos_vec) -
(pos_vec | pos_vec))
def mechanics_printing():
"""Sets up interactive printing for mechanics' derivatives.
The main benefit of this is for printing of time derivatives;
instead of displaying as Derivative(f(t),t), it will display f'
This is only actually needed for when derivatives are present and are not
in a physics.mechanics object.
Examples
========
>>> # 2 lines below are for tests to function properly
>>> import sys
>>> sys.displayhook = sys.__displayhook__
>>> from sympy import Function, Symbol, diff
>>> from sympy.physics.mechanics import mechanics_printing
>>> f = Function('f')
>>> t = Symbol('t')
>>> x = Symbol('x')
>>> diff(f(t), t)
Derivative(f(t), t)
>>> mechanics_printing()
>>> diff(f(t), t)
f'
>>> diff(f(x), x)
Derivative(f(x), x)
>>> # 2 lines below are for tests to function properly
>>> import sys
>>> sys.displayhook = sys.__displayhook__
"""
import sys
sys.displayhook = mprint
def mprint(expr, **settings):
r"""Function for printing of expressions generated in mechanics.
Extends SymPy's StrPrinter; mprint is equivalent to:
print sstr()
mprint takes the same options as sstr.
Parameters
==========
expr : valid sympy object
SymPy expression to print
settings : args
Same as print for SymPy
Examples
========
>>> from sympy.physics.mechanics import mprint, dynamicsymbols
>>> u1 = dynamicsymbols('u1')
>>> print(u1)
u1(t)
>>> mprint(u1)
u1
"""
pr = MechanicsStrPrinter(settings)
outstr = pr.doprint(expr)
import __builtin__
if (outstr != 'None'):
__builtin__._ = outstr
print(outstr)
def mpprint(expr, **settings):
r"""Function for pretty printing of expressions generated in mechanics.
Mainly used for expressions not inside a vector; the output of running
scripts and generating equations of motion. Takes the same options as
SymPy's pretty_print(); see that function for more information.
Parameters
==========
expr : valid sympy object
SymPy expression to pretty print
settings : args
Same as pretty print
Examples
========
Use in the same way as pprint
"""
mp = MechanicsPrettyPrinter(settings)
print(mp.doprint(expr))
def mlatex(expr, **settings):
r"""Function for printing latex representation of mechanics objects.
For latex representation of Vectors, Dyadics, and dynamicsymbols. Takes the
same options as SymPy's latex(); see that function for more information;
Parameters
==========
expr : valid sympy object
SymPy expression to represent in LaTeX form
settings : args
Same as latex()
Examples
========
>>> from sympy.physics.mechanics import mlatex, ReferenceFrame, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q1, q2 = dynamicsymbols('q1 q2')
>>> q1d, q2d = dynamicsymbols('q1 q2', 1)
>>> q1dd, q2dd = dynamicsymbols('q1 q2', 2)
>>> mlatex(N.x + N.y)
'\\mathbf{\\hat{n}_x} + \\mathbf{\\hat{n}_y}'
>>> mlatex(q1 + q2)
'q_{1} + q_{2}'
>>> mlatex(q1d)
'\\dot{q}_{1}'
>>> mlatex(q1 * q2d)
'q_{1} \\dot{q}_{2}'
>>> mlatex(q1dd * q1 / q1d)
'\\frac{q_{1} \\ddot{q}_{1}}{\\dot{q}_{1}}'
"""
return MechanicsLatexPrinter(settings).doprint(expr)
def kinematic_equations(speeds, coords, rot_type, rot_order=''):
"""Gives equations relating the qdot's to u's for a rotation type.
Supply rotation type and order as in orient. Speeds are assumed to be
body-fixed; if we are defining the orientation of B in A using by rot_type,
the angular velocity of B in A is assumed to be in the form: speed[0]*B.x +
speed[1]*B.y + speed[2]*B.z
Parameters
==========
speeds : list of length 3
The body fixed angular velocity measure numbers.
coords : list of length 3 or 4
The coordinates used to define the orientation of the two frames.
rot_type : str
The type of rotation used to create the equations. Body, Space, or
Quaternion only
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.mechanics import dynamicsymbols
>>> from sympy.physics.mechanics import kinematic_equations, mprint
>>> u1, u2, u3 = dynamicsymbols('u1 u2 u3')
>>> q1, q2, q3 = dynamicsymbols('q1 q2 q3')
>>> mprint(kinematic_equations([u1,u2,u3], [q1,q2,q3], 'body', '313'),
... order=None)
[-(u1*sin(q3) + u2*cos(q3))/sin(q2) + q1', -u1*cos(q3) + u2*sin(q3) + q2', (u1*sin(q3) + u2*cos(q3))*cos(q2)/sin(q2) - u3 + q3']
"""
# Code below is checking and sanitizing input
approved_orders = ('123', '231', '312', '132', '213', '321', '121', '131',
'212', '232', '313', '323', '1', '2', '3', '')
rot_order = str(rot_order).upper() # Now we need to make sure XYZ = 123
rot_type = rot_type.upper()
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if not isinstance(speeds, (list, tuple)):
raise TypeError('Need to supply speeds in a list')
if len(speeds) != 3:
raise TypeError('Need to supply 3 body-fixed speeds')
if not isinstance(coords, (list, tuple)):
raise TypeError('Need to supply coordinates in a list')
if rot_type.lower() in ['body', 'space']:
if rot_order not in approved_orders:
raise ValueError('Not an acceptable rotation order')
if len(coords) != 3:
raise ValueError('Need 3 coordinates for body or space')
# Actual hard-coded kinematic differential equations
q1, q2, q3 = coords
q1d, q2d, q3d = [diff(i, dynamicsymbols._t) for i in coords]
w1, w2, w3 = speeds
s1, s2, s3 = [sin(q1), sin(q2), sin(q3)]
c1, c2, c3 = [cos(q1), cos(q2), cos(q3)]
if rot_type.lower() == 'body':
if rot_order == '123':
return [q1d - (w1 * c3 - w2 * s3) / c2, q2d - w1 * s3 - w2 *
c3, q3d - (-w1 * c3 + w2 * s3) * s2 / c2 - w3]
if rot_order == '231':
return [q1d - (w2 * c3 - w3 * s3) / c2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (- w2 * c3 + w3 * s3) * s2 / c2]
if rot_order == '312':
return [q1d - (-w1 * s3 + w3 * c3) / c2, q2d - w1 * c3 - w3 *
s3, q3d - (w1 * s3 - w3 * c3) * s2 / c2 - w2]
if rot_order == '132':
return [q1d - (w1 * c3 + w3 * s3) / c2, q2d + w1 * s3 - w3 *
c3, q3d - (w1 * c3 + w3 * s3) * s2 / c2 - w2]
if rot_order == '213':
return [q1d - (w1 * s3 + w2 * c3) / c2, q2d - w1 * c3 + w2 *
s3, q3d - (w1 * s3 + w2 * c3) * s2 / c2 - w3]
if rot_order == '321':
return [q1d - (w2 * s3 + w3 * c3) / c2, q2d - w2 * c3 + w3 *
s3, q3d - w1 - (w2 * s3 + w3 * c3) * s2 / c2]
if rot_order == '121':
return [q1d - (w2 * s3 + w3 * c3) / s2, q2d - w2 * c3 + w3 *
s3, q3d - w1 + (w2 * s3 + w3 * c3) * c2 / s2]
if rot_order == '131':
return [q1d - (-w2 * c3 + w3 * s3) / s2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (w2 * c3 - w3 * s3) * c2 / s2]
if rot_order == '212':
return [q1d - (w1 * s3 - w3 * c3) / s2, q2d - w1 * c3 - w3 *
s3, q3d - (-w1 * s3 + w3 * c3) * c2 / s2 - w2]
if rot_order == '232':
return [q1d - (w1 * c3 + w3 * s3) / s2, q2d + w1 * s3 - w3 *
c3, q3d + (w1 * c3 + w3 * s3) * c2 / s2 - w2]
if rot_order == '313':
return [q1d - (w1 * s3 + w2 * c3) / s2, q2d - w1 * c3 + w2 *
s3, q3d + (w1 * s3 + w2 * c3) * c2 / s2 - w3]
if rot_order == '323':
return [q1d - (-w1 * c3 + w2 * s3) / s2, q2d - w1 * s3 - w2 *
c3, q3d - (w1 * c3 - w2 * s3) * c2 / s2 - w3]
if rot_type.lower() == 'space':
if rot_order == '123':
return [q1d - w1 - (w2 * s1 + w3 * c1) * s2 / c2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / c2]
if rot_order == '231':
return [q1d - (w1 * c1 + w3 * s1) * s2 / c2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / c2]
if rot_order == '312':
return [q1d - (w1 * s1 + w2 * c1) * s2 / c2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / c2]
if rot_order == '132':
return [q1d - w1 - (-w2 * c1 + w3 * s1) * s2 / c2, q2d - w2 *
s1 - w3 * c1, q3d - (w2 * c1 - w3 * s1) / c2]
if rot_order == '213':
return [q1d - (w1 * s1 - w3 * c1) * s2 / c2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (-w1 * s1 + w3 * c1) / c2]
if rot_order == '321':
return [q1d - (-w1 * c1 + w2 * s1) * s2 / c2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (w1 * c1 - w2 * s1) / c2]
if rot_order == '121':
return [q1d - w1 + (w2 * s1 + w3 * c1) * c2 / s2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / s2]
if rot_order == '131':
return [q1d - w1 - (w2 * c1 - w3 * s1) * c2 / s2, q2d - w2 *
s1 - w3 * c1, q3d - (-w2 * c1 + w3 * s1) / s2]
if rot_order == '212':
return [q1d - (-w1 * s1 + w3 * c1) * c2 / s2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (w1 * s1 - w3 * c1) / s2]
if rot_order == '232':
return [q1d + (w1 * c1 + w3 * s1) * c2 / s2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / s2]
if rot_order == '313':
return [q1d + (w1 * s1 + w2 * c1) * c2 / s2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / s2]
if rot_order == '323':
return [q1d - (w1 * c1 - w2 * s1) * c2 / s2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (-w1 * c1 + w2 * s1) / s2]
elif rot_type.lower() == 'quaternion':
if rot_order != '':
raise ValueError('Cannot have rotation order for quaternion')
if len(coords) != 4:
raise ValueError('Need 4 coordinates for quaternion')
# Actual hard-coded kinematic differential equations
e0, e1, e2, e3 = coords
w = Matrix(speeds + [0])
E = Matrix([[e0, -e3, e2, e1], [e3, e0, -e1, e2], [-e2, e1, e0, e3],
[-e1, -e2, -e3, e0]])
edots = Matrix([diff(i, dynamicsymbols._t) for i in [e1, e2, e3, e0]])
return list(edots.T - 0.5 * w.T * E.T)
else:
raise ValueError('Not an approved rotation type for this function')
def partial_velocity(vel_list, u_list, frame):
"""Returns a list of partial velocities.
For a list of velocity or angular velocity vectors the partial derivatives
with respect to the supplied generalized speeds are computed, in the
specified ReferenceFrame.
The output is a list of lists. The outer list has a number of elements
equal to the number of supplied velocity vectors. The inner lists are, for
each velocity vector, the partial derivatives of that velocity vector with
respect to the generalized speeds supplied.
Parameters
==========
vel_list : list
List of velocities of Point's and angular velocities of ReferenceFrame's
u_list : list
List of independent generalized speeds.
frame : ReferenceFrame
The ReferenceFrame the partial derivatives are going to be taken in.
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> from sympy.physics.mechanics import dynamicsymbols
>>> from sympy.physics.mechanics import partial_velocity
>>> u = dynamicsymbols('u')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, u * N.x)
>>> vel_list = [P.vel(N)]
>>> u_list = [u]
>>> partial_velocity(vel_list, u_list, N)
[[N.x]]
"""
if not hasattr(vel_list, '__iter__'):
raise TypeError('Provide velocities in an iterable')
if not hasattr(u_list, '__iter__'):
raise TypeError('Provide speeds in an iterable')
list_of_pvlists = []
for i in vel_list:
pvlist = []
for j in u_list:
vel = i.diff(j, frame)
pvlist += [vel]
list_of_pvlists += [pvlist]
return list_of_pvlists
def linear_momentum(frame, *body):
"""Linear momentum of the system.
This function returns the linear momentum of a system of Particle's and/or
RigidBody's. The linear momentum of a system is equal to the vector sum of
the linear momentum of its constituents. Consider a system, S, comprised of
a rigid body, A, and a particle, P. The linear momentum of the system, L,
is equal to the vector sum of the linear momentum of the particle, L1, and
the linear momentum of the rigid body, L2, i.e-
L = L1 + L2
Parameters
==========
frame : ReferenceFrame
The frame in which linear momentum is desired.
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose kinetic energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, linear_momentum
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, 10 * N.x)
>>> Pa = Particle('Pa', P, 1)
>>> Ac = Point('Ac')
>>> Ac.set_vel(N, 25 * N.y)
>>> I = outer(N.x, N.x)
>>> A = RigidBody('A', Ac, N, 20, (I, Ac))
>>> linear_momentum(N, A, Pa)
10*N.x + 500*N.y
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Please specify a valid ReferenceFrame')
else:
linear_momentum_sys = S(0)
for e in body:
if isinstance(e, (RigidBody, Particle)):
linear_momentum_sys += e.linear_momentum(frame)
else:
raise TypeError('*body must have only Particle or RigidBody')
return linear_momentum_sys
def angular_momentum(point, frame, *body):
"""Angular momentum of a system
This function returns the angular momentum of a system of Particle's and/or
RigidBody's. The angular momentum of such a system is equal to the vector
sum of the angular momentum of its constituents. Consider a system, S,
comprised of a rigid body, A, and a particle, P. The angular momentum of
the system, H, is equal to the vector sum of the linear momentum of the
particle, H1, and the linear momentum of the rigid body, H2, i.e-
H = H1 + H2
Parameters
==========
point : Point
The point about which angular momentum of the system is desired.
frame : ReferenceFrame
The frame in which angular momentum is desired.
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose kinetic energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, angular_momentum
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> O.set_vel(N, 0 * N.x)
>>> P = O.locatenew('P', 1 * N.x)
>>> P.set_vel(N, 10 * N.x)
>>> Pa = Particle('Pa', P, 1)
>>> Ac = O.locatenew('Ac', 2 * N.y)
>>> Ac.set_vel(N, 5 * N.y)
>>> a = ReferenceFrame('a')
>>> a.set_ang_vel(N, 10 * N.z)
>>> I = outer(N.z, N.z)
>>> A = RigidBody('A', Ac, a, 20, (I, Ac))
>>> angular_momentum(O, N, Pa, A)
10*N.z
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Please enter a valid ReferenceFrame')
if not isinstance(point, Point):
raise TypeError('Please specify a valid Point')
else:
angular_momentum_sys = S(0)
for e in body:
if isinstance(e, (RigidBody, Particle)):
angular_momentum_sys += e.angular_momentum(point, frame)
else:
raise TypeError('*body must have only Particle or RigidBody')
return angular_momentum_sys
def kinetic_energy(frame, *body):
"""Kinetic energy of a multibody system.
This function returns the kinetic energy of a system of Particle's and/or
RigidBody's. The kinetic energy of such a system is equal to the sum of
the kinetic energies of its constituents. Consider a system, S, comprising
a rigid body, A, and a particle, P. The kinetic energy of the system, T,
is equal to the vector sum of the kinetic energy of the particle, T1, and
the kinetic energy of the rigid body, T2, i.e.
T = T1 + T2
Kinetic energy is a scalar.
Parameters
==========
frame : ReferenceFrame
The frame in which the velocity or angular velocity of the body is
defined.
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose kinetic energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, kinetic_energy
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> O.set_vel(N, 0 * N.x)
>>> P = O.locatenew('P', 1 * N.x)
>>> P.set_vel(N, 10 * N.x)
>>> Pa = Particle('Pa', P, 1)
>>> Ac = O.locatenew('Ac', 2 * N.y)
>>> Ac.set_vel(N, 5 * N.y)
>>> a = ReferenceFrame('a')
>>> a.set_ang_vel(N, 10 * N.z)
>>> I = outer(N.z, N.z)
>>> A = RigidBody('A', Ac, a, 20, (I, Ac))
>>> kinetic_energy(N, Pa, A)
350
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Please enter a valid ReferenceFrame')
ke_sys = S(0)
for e in body:
if isinstance(e, (RigidBody, Particle)):
ke_sys += e.kinetic_energy(frame)
else:
raise TypeError('*body must have only Particle or RigidBody')
return ke_sys
def potential_energy(*body):
"""Potential energy of a multibody system.
This function returns the potential energy of a system of Particle's and/or
RigidBody's. The potential energy of such a system is equal to the sum of
the potential energy of its constituents. Consider a system, S, comprising
a rigid body, A, and a particle, P. The potential energy of the system, V,
is equal to the vector sum of the potential energy of the particle, V1, and
the potential energy of the rigid body, V2, i.e.
V = V1 + V2
Potential energy is a scalar.
Parameters
==========
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose potential energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, potential_energy
>>> from sympy import symbols
>>> M, m, g, h = symbols('M m g h')
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> O.set_vel(N, 0 * N.x)
>>> P = O.locatenew('P', 1 * N.x)
>>> Pa = Particle('Pa', P, m)
>>> Ac = O.locatenew('Ac', 2 * N.y)
>>> a = ReferenceFrame('a')
>>> I = outer(N.z, N.z)
>>> A = RigidBody('A', Ac, a, M, (I, Ac))
>>> Pa.set_potential_energy(m * g * h)
>>> A.set_potential_energy(M * g * h)
>>> potential_energy(Pa, A)
M*g*h + g*h*m
"""
pe_sys = S(0)
for e in body:
if isinstance(e, (RigidBody, Particle)):
pe_sys += e.potential_energy
else:
raise TypeError('*body must have only Particle or RigidBody')
return pe_sys
def Lagrangian(frame, *body):
"""Lagrangian of a multibody system.
This function returns the Lagrangian of a system of Particle's and/or
RigidBody's. The Lagrangian of such a system is equal to the difference
between the kinetic energies and potential energies of its constituents. If
T and V are the kinetic and potential energies of a system then it's
Lagrangian, L, is defined as
L = T - V
The Lagrangian is a scalar.
Parameters
==========
frame : ReferenceFrame
The frame in which the velocity or angular velocity of the body is
defined to determine the kinetic energy.
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose kinetic energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, Lagrangian
>>> from sympy import symbols
>>> M, m, g, h = symbols('M m g h')
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> O.set_vel(N, 0 * N.x)
>>> P = O.locatenew('P', 1 * N.x)
>>> P.set_vel(N, 10 * N.x)
>>> Pa = Particle('Pa', P, 1)
>>> Ac = O.locatenew('Ac', 2 * N.y)
>>> Ac.set_vel(N, 5 * N.y)
>>> a = ReferenceFrame('a')
>>> a.set_ang_vel(N, 10 * N.z)
>>> I = outer(N.z, N.z)
>>> A = RigidBody('A', Ac, a, 20, (I, Ac))
>>> Pa.set_potential_energy(m * g * h)
>>> A.set_potential_energy(M * g * h)
>>> Lagrangian(N, Pa, A)
-M*g*h - g*h*m + 350
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Please supply a valid ReferenceFrame')
for e in body:
if not isinstance(e, (RigidBody, Particle)):
raise TypeError('*body must have only Particle or RigidBody')
return kinetic_energy(frame, *body) - potential_energy(*body)
| {
"repo_name": "amitjamadagni/sympy",
"path": "sympy/physics/mechanics/functions.py",
"copies": "2",
"size": "26581",
"license": "bsd-3-clause",
"hash": 4707582206880305000,
"line_mean": 34.2066225166,
"line_max": 132,
"alpha_frac": 0.5553590911,
"autogenerated": false,
"ratio": 3.213758916696893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47691180077968925,
"avg_score": null,
"num_lines": null
} |
__all__ = ('current', 'get')
import os.path
activate_this = os.path.join(os.path.dirname(__file__), 'bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import json, os, requests, sys
from urlparse import urljoin
bridges_path = os.path.join(os.getenv('LB_SUPPORT_PATH'), 'bridges.json')
bridges = None
try:
if os.path.exists(bridges_path):
bridges = json.load(file(bridges_path))
except Exception, e:
print >> sys.stderr, 'Error restoring bridges:', e
if not bridges:
bridges = dict(all={})
# XXX automatically handle bridge changed IP address?
class HueException(Exception):
LINK_BUTTON_NOT_PRESSED = 101
def has_type(self, type_id):
type_id = int(type_id)
return any(error for error in self.args
if int(error['type']) == type_id)
def __str__(self):
return '\n'.join(error['description'] for error in self.args)
class HueAPI(object):
__slots__ = ('url', 'bridge')
def __init__(self, bridge, url):
self.bridge = bridge
self.url = url
def _make_request(self, method=None, *args, **kw):
url = self.url
if self.bridge is not self:
url = urljoin(self.bridge.url, url)
if args:
url = urljoin(url, '/'.join(map(str, args)))
# print url
if method is None:
method = 'PUT' if kw else 'GET'
try:
if kw:
response = requests.request(method, url, json=kw, timeout=2)
else:
response = requests.request(method, url, timeout=2)
except requests.ConnectionError, e:
print >> sys.stderr, e
exit_with_connection_error()
response.raise_for_status()
response_json = response.json()
# print response_json
if type(response_json) is list:
errors = [r['error'] for r in response_json
if type(r) is dict and 'error' in r]
if errors:
raise HueException(*errors)
return response_json
def __call__(self, **kw):
return self._make_request(**kw)
def __delattr__(self, attr):
return self._make_request('DELETE', attr)
__delitem__ = __delattr__
def __getattr__(self, attr):
url = str(attr) + '/'
if self.bridge is not self:
url = urljoin(self.url, url)
return HueAPI(self.bridge, url)
__getitem__ = __getattr__
def __repr__(self):
if self.bridge is self:
return '<%s: %s>' % (self.__class__.__name__, self.url)
return '<%s: %s | %s>' % (self.__class__.__name__,
self.bridge.url, self.url)
__str__ = __repr__
class HueBridge(HueAPI):
__slots__ = ('serial', 'info',)
def __init__(self, serial, info):
self.serial = serial
self.info = info
self.bridge = self
self._update_url()
@property
def modelid(self): return self.info['modelid']
@property
def name(self): return self.info['name']
@property
def icon(self):
version = '1' if self.modelid == '929000226503' else '2'
return ('bridge_v' + version if self.linked
else 'pushlink_bridgev' + version) + '.pdf'
@property
def linked(self):
if 'username' not in self.info:
return False
if 'whitelist' not in self.config():
del self.info['username']
save()
return False
return True
def link(self):
import subprocess
computer_name = subprocess.check_output(
['/usr/sbin/scutil', '--get', 'ComputerName']).rstrip()[:19]
try:
user = self._make_request(method='POST',
devicetype='LBHue#' + computer_name)
except HueException, e:
if e.has_type(HueException.LINK_BUTTON_NOT_PRESSED):
return False
self.info['username'] = user[0]['success']['username']
save()
self._update_url()
return True
def make_current(self):
bridges['current'] = self.serial
save()
def _update_url(self):
username = self.info.get('username')
if username:
self.url = urljoin(self.info['url'], '/api/%s/' % username)
else:
self.url = urljoin(self.info['url'], '/api/')
class NoBridge(object):
def __call__(self, **kw):
return {}
def __nonzero__(self):
return False
def __getattr__(self, attr): return self
__getitem__ = __getattr__
# Serial numbers (from description.xml) are 12 hex digits.
# Bridge IDs (in Hue or N-UPnP API) are 16 hex digits, consisting of:
# (first 6 digits of serial number) + fffe + (last 6 digits of serial number)
# This is not documented anywhere, but be careful not to confuse the two.
def get(serial, url=None):
serial = serial.upper()
assert len(serial) == 12
bridge_info = bridges['all'].get(serial, {})
if url:
bridge_info.update(url=url)
bridges['all'][serial] = bridge_info
elif not bridge_info:
return
bridge = HueBridge(serial, bridge_info)
if url: # update configuration
config = bridge.config()
bridge_info.update(modelid=config['modelid'], name=config['name'])
save()
return bridge
# unique HueBridges?
def current():
current_serial = bridges.get('current')
if current_serial:
return get(current_serial)
return NoBridge()
def save():
json.dump(bridges, file(bridges_path, 'w'))
# If we get a connection error, offer to relink a bridge
def exit_with_connection_error():
import discover
item = discover.discover_item('Unable to connect. Relink bridge?')
print json.dumps(item)
sys.exit(0)
| {
"repo_name": "nriley/LBHue",
"path": "Hue.lbaction/Contents/Scripts/bridges.py",
"copies": "1",
"size": "5849",
"license": "apache-2.0",
"hash": -8407444623702303000,
"line_mean": 26.2046511628,
"line_max": 79,
"alpha_frac": 0.5677893657,
"autogenerated": false,
"ratio": 3.746957078795644,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.980264533874804,
"avg_score": 0.0024202211495209748,
"num_lines": 215
} |
"""All data and feature processing functions and routines"""
__author__ = "Gabriel Urbain"
__copyright__ = "Copyright 2017, Gabriel Urbain"
__license__ = "MIT"
__version__ = "0.2"
__maintainer__ = "Gabriel Urbain"
__email__ = "gabriel.urbain@ugent.be"
__status__ = "Research"
__date__ = "September 1st, 2017"
import analysis
import data
import utils
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import os
import numpy as np
import sys
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import text, DictVectorizer
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler
reload(sys)
sys.setdefaultencoding('utf8')
DEFAULT_FT_LOCATION = 'Features'
TRAINING_PART = 0.95
VALIDATE_PART = 1 - TRAINING_PART
RANDOM = 42
SEED = 659
class ItemSelector(BaseEstimator, TransformerMixin):
"""
This class selects elements by key in a dataset to feed specific estimators
"""
def __init__(self, key, typ="list"):
assert (typ in ["list", "dict"]), "Item can only return list of dict types!"
assert (key in ["content", "product", "author"]), "Available keys are content, product, author!"
self.key = key
self.typ = typ
def fit(self, x, y=None):
return self
def transform(self, dataset):
if self.typ == "list":
liste = []
if self.key == "content":
liste = [r.content for r in dataset]
elif self.key == "author":
liste = [r.author for r in dataset]
elif self.key == "product":
liste = [r.product for r in dataset]
return liste
elif self.typ == "dict":
dictionary = []
if self.key == "content":
dictionary = [{r.content: 1} for r in dataset]
elif self.key == "author":
dictionary = [{r.author: 1} for r in dataset]
elif self.key == "product":
dictionary = [{r.product: 1} for r in dataset]
return dictionary
else:
print "Type error in ItemSelector!"
return
class Float2Labels(BaseEstimator, TransformerMixin):
def __init__(self, min_r=0, max_r=5):
self.min_r = min_r
self.max_r = max_r
def transform(self, X, *_):
# Threshold
lv = X < self.min_r
hv = X > self.max_r
X[lv] = self.min_r
X[hv] = self.max_r
return np.rint(X)
def fit(self, *_):
return self
def loss_fct(truth, prediction):
"""
Evaluate the gap between the target and prediction
"""
diff = truth - prediction
score = float(np.sum(np.abs(diff))) / diff.size
return score
def create_target(dataset):
target = []
for r in dataset:
target.append(r.rating)
return np.array(target)
def create_ft_ct_pd_au(ngram=3, max_df=0.3, min_df=0.0003, w_ct=1, w_pd=1, w_au=1):
# Declare estimators
stop_words = text.ENGLISH_STOP_WORDS.union(["s", "t", "2", "m", "ve"])
content_fct = text.TfidfVectorizer(tokenizer=analysis.get_text_ngram, ngram_range=(1, ngram),
min_df=min_df, max_df=max_df, stop_words=stop_words)
product_fct = DictVectorizer()
author_fct = DictVectorizer()
# Create features pipe
tl = [('content', Pipeline([
('selector_ct', ItemSelector(key='content')),
('content_ft', content_fct),
])),
('product', Pipeline([
('selector_pd', ItemSelector(key='product', typ="dict")),
('product_ft', product_fct),
])),
('author', Pipeline([
('selector_au', ItemSelector(key='author', typ="dict")),
('author_ft', author_fct),
]))
]
tw = {'content': w_ct, 'product': w_pd, 'author': w_au}
return Pipeline([('ft_extractor', FeatureUnion(transformer_list=tl, transformer_weights=tw))])
def create_ft_ctsvd_pd_au(ngram=3, k=10000, max_df=0.3, min_df=0.0003, w_ct=1, w_pd=1, w_au=1):
# Declare estimators
stop_words = text.ENGLISH_STOP_WORDS.union(["s", "t", "2", "m", "ve"])
content_fct = text.TfidfVectorizer(tokenizer=analysis.get_text_ngram, ngram_range=(1, ngram),
min_df=min_df, max_df=max_df, stop_words=stop_words)
product_fct = DictVectorizer()
author_fct = DictVectorizer()
# Create features pipe
tl = [('content', Pipeline([
('selector_ct', ItemSelector(key='content')),
('ft', content_fct),
('reductor', TruncatedSVD(n_components=int(k))),
])),
('product', Pipeline([
('selector_pd', ItemSelector(key='product', typ="dict")),
('ft', product_fct),
])),
('author', Pipeline([
('selector_au', ItemSelector(key='author', typ="dict")),
('ft', author_fct),
]))
]
tw = {'content': w_ct, 'product': w_pd, 'author': w_au}
return Pipeline([('ft_extractor', FeatureUnion(transformer_list=tl, transformer_weights=tw))])
def create_ft_ct(ngram=3, max_df=0.3, min_df=0.0003):
stop_words = text.ENGLISH_STOP_WORDS.union(["s", "t", "2", "m", "ve"])
content_fct = text.TfidfVectorizer(tokenizer=analysis.get_text_ngram, ngram_range=(1, ngram),
min_df=min_df, max_df=max_df, stop_words=stop_words)
return Pipeline(([('selector_ct', ItemSelector(key='content')), ('content_ft', content_fct)]))
def save_ft(test_ft, train_ft, target, filename=None):
ts = utils.timestamp()
if filename is None:
filename = os.path.join(DEFAULT_FT_LOCATION, "ft_" + ts + ".pkl")
utils.dump_pickle((test_ft, train_ft, target), filename)
return filename
def load_ft(filename):
(test_ft, train_ft, target) = utils.load_pickle(filename)
return test_ft, train_ft, target
def create_ft(svd=True):
# 1. Get the data
print "1. Load data\n"
train_set = data.load_pickled_data()['train']
test_set = data.load_pickled_data()['test']
target = create_target(train_set)
# 2. Create the feature matrices
print "2. Create features"
if svd:
ft_pipe = Pipeline([('ft', create_ft_ct_pd_au()), ('red', TruncatedSVD(n_components=1000)),
('norm', MinMaxScaler())])
else:
ft_pipe = Pipeline([('ft', create_ft_ct_pd_au()), ('norm', MaxAbsScaler())])
train_ft = ft_pipe.fit_transform(train_set)
test_ft = ft_pipe.transform(test_set)
print "Train features matrix size: " + str(train_ft.shape) + " and target size: " + str(len(target))
print "Test features matrix size: " + str(test_ft.shape) + "\n"
# 3. Save features
print "3. Save features"
if svd:
save_ft(test_ft, train_ft, target, filename=DEFAULT_FT_LOCATION + "/ft_svd.pkl")
r, c = train_ft.nonzero()
feature_array = train_ft[r, c].flatten().tolist()
plt.hist(feature_array, 10, alpha=0.75)
plt.title('Features Histogram')
plt.tight_layout()
plt.savefig(DEFAULT_FT_LOCATION + "/histogram_svd.png", format='png', dpi=300)
plt.close()
else:
save_ft(test_ft, train_ft, target, filename=DEFAULT_FT_LOCATION + "/ft_max_scaler.pkl")
r, c = train_ft.nonzero()
feature_array = train_ft[r, c].flatten().tolist()
plt.hist(feature_array, 50, alpha=0.75)
plt.title('Features Histogram')
plt.tight_layout()
plt.savefig(DEFAULT_FT_LOCATION + "/histogram_max_scaler.png", format='png', dpi=300)
plt.close()
if __name__ == '__main__':
args = sys.argv
if args[1] == "ft":
create_ft()
else:
print "Option does not exist. Please, check the preprocessing.py file"
| {
"repo_name": "Gabs48/ML_competition",
"path": "preprocessing.py",
"copies": "1",
"size": "8334",
"license": "mit",
"hash": -878976530397160700,
"line_mean": 31.3023255814,
"line_max": 104,
"alpha_frac": 0.555675546,
"autogenerated": false,
"ratio": 3.623478260869565,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.966546043498484,
"avg_score": 0.0027386743769450216,
"num_lines": 258
} |
"""All database models for this application."""
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from stdimage.models import StdImageField
from django.core.urlresolvers import reverse
from core.managers import GameManager
from core.managers import UserManager
class MyFile(models.Model):
game_file = models.FileField(blank=True, null=True)
name = models.CharField(max_length=100, null=True)
def __unicode__(self):
return self.name
class User(AbstractBaseUser, PermissionsMixin):
"""
User for this site.
Requires only an email and password.
"""
GENDER_CHOICES = (('', 'Prefer not to disclose'),
('M', 'Male'),
('F', 'Female'),
('O', 'Other'),)
email = models.EmailField(blank=False, unique=True)
first_name = models.CharField(max_length=30, blank=True)
last_name = models.CharField(max_length=30, blank=True)
is_staff = models.BooleanField(default=False,
help_text='Designates whether the user can log into this admin '
'site.')
date_joined = models.DateTimeField(auto_now_add=True)
birthday = models.DateField(null=True, blank=True)
gender = models.CharField(max_length=1,
blank=True,
choices=GENDER_CHOICES)
public = models.BooleanField(default=True,
help_text='Determines whether or not your profile is open to the public')
USERNAME_FIELD = 'email'
objects = UserManager()
class Meta:
verbose_name = 'user'
verbose_name_plural = 'users'
@property
def display_name(self):
"""Return the name that should be displayed to the public"""
if not self.public:
return 'Anonymous'
elif self.get_full_name():
return self.get_full_name()
else:
return self.email
@property
def has_unread_notifications(self):
notifications = UserNotification.objects.filter(user=self, read=False)
return len(notifications) is not 0
@property
def notifications(self):
notifications = UserNotification.objects.filter(user=self).order_by('read')[:5]
return notifications
def push_notification(self, description, url):
notification = UserNotification()
notification.user = self
notification.description = description
notification.redirect_url = url
notification.save()
def get_full_name(self):
"""Return the first_name plus the last_name, with a space in between."""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""Return the short name for the user."""
return self.first_name
def can_edit_game(self, game):
return self in game.group.members.all()
class UserNotification(models.Model):
redirect_url = models.URLField()
user = models.ForeignKey(User)
description = models.CharField(max_length=256)
read = models.BooleanField(default=False)
@property
def link(self):
return reverse('core:profile:notifications', kwargs={'notification_id': self.pk})
def __unicode__(self):
return self.description
class Group(models.Model):
"""Groups that can consist of Users."""
members = models.ManyToManyField(User)
name = models.CharField(max_length=50)
def get_games(self):
games = Game.objects.filter(group=self)
return games
def get_absolute_url(self):
"""Detail page for a group."""
return reverse('core:groups-detail', kwargs={'pk': self.pk})
def push_notification(self, description, url):
for user in self.members.all():
notification = UserNotification()
notification.user = user
notification.description = description
notification.redirect_url = url
notification.save()
def __unicode__(self):
return self.name
class GroupInvitation(models.Model):
"""An invitation for a group."""
user = models.ForeignKey(User, null=False)
group = models.ForeignKey(Group, null=False)
inviting = models.BooleanField(null=False, default=True)
# For the inviting field here:
# true: this is an invation from the group to the user
# false: this is a request from the user to join the group
def accept(self):
self.group.members.add(self.user)
self.group.save()
self.group.push_notification('A new user has joined your group!',
reverse('core:groups-detail', kwargs={'pk': self.group.id}))
self.delete()
def decline(self):
self.delete()
def valid_user(self, user):
return ((self.inviting and user == self.user) or
(not self.inviting and user in self.group.members.all()))
@classmethod
def create(cls, group, user, inviting):
invite = cls(user=user, group=group, inviting=inviting)
invite.save()
if inviting:
invite.user.push_notification('A group has invited you to join.',
reverse('core:invite', kwargs={'pk': invite.id}))
else:
invite.group.push_notification('A user has requested to join your group.',
reverse('core:invite', kwargs={'pk': invite.id}))
return invite
class GameTag(models.Model):
"""Tags to label Games."""
value = models.CharField(max_length=50)
def __unicode__(self):
return self.value
class Game(models.Model):
"""Game object."""
name = models.CharField(max_length=50)
image = StdImageField(upload_to='game_images', null=True, blank=True,
variations={'thumbnail': {'width': 200, 'height': 200}})
game_file = models.ManyToManyField(MyFile, blank=True)
description = models.TextField(max_length=5000)
date_published = models.DateField(auto_now_add=True)
group = models.ForeignKey(Group, null=False)
event_name = models.CharField(max_length=75, blank=True, default='')
tags = models.ManyToManyField(GameTag, null=True, blank=True)
featured = models.BooleanField(default=False)
objects = GameManager()
@property
def small_description(self):
if len(self.description) > 300:
return self.description[:300] + '...'
else:
return self.description
@property
def average_rating(self):
ratings = zip(*self.gamerating_set.all().values_list('value'))
if ratings:
ratings = ratings[0]
return sum(ratings) / len(ratings)
else:
return 0
@property
def total_ratings(self):
ratings = zip(*self.gamerating_set.all().values_list('value'))
if len(ratings) != 0:
return len(ratings[0])
return 0
def push_notification(self):
if self.group:
return self.group.push_notification(description='Somebody commented on a game of yours!',
url=reverse('core:games:specific', kwargs={'game_id': self.pk}))
def __unicode__(self):
return self.name
class GameRating(models.Model):
game = models.ForeignKey(Game)
user = models.ForeignKey(User)
value = models.FloatField(choices=(
(.5, .5),
(1, 1),
(1.5, 1.5),
(2, 2),
(2.5, 2.5),
(3, 3),
(3.5, 3.5),
(4, 4),
(4.5, 4.5),
(5, 5),
))
| {
"repo_name": "joshsamara/game-website",
"path": "core/models.py",
"copies": "1",
"size": "7740",
"license": "mit",
"hash": 7531074596095337000,
"line_mean": 31.3849372385,
"line_max": 112,
"alpha_frac": 0.6027131783,
"autogenerated": false,
"ratio": 4.1257995735607675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5228512751860768,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Database']
import os
import sys
from collections import OrderedDict
from .table import Table
from .column import Column
class Database(object):
def __init__(self, store, db_name):
self.store = store
self.db_name = db_name
self.opened = False
self.tables = []
# database dir
dirpath = self.get_path()
if not os.path.exists(dirpath):
try:
os.makedirs(dirpath)
except OSError as e:
raise Exception('could not create database %r' % db_name)
def __enter__(self):
# open self if not
if not self.is_opened():
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.is_opened():
self.close()
return False
def get_path(self):
return os.path.join(self.store.get_path(), self.db_name)
def is_opened(self):
return self.opened
def open(self):
# database dir
dirpath = self.get_path()
for table_name in os.listdir(dirpath):
print 'table_name:', table_name
self.opened = True
def close(self):
for t in self.tables:
if t.is_opened():
t.close()
self.opened = False
def table(self, table_name, **_type_fields):
# open self if not
if not self.is_opened():
self.open()
# type_fields
if _type_fields:
type_fields = OrderedDict()
# sort type_fields
_items = sorted(_type_fields.items(), key=lambda n: n[0])
for column_name, column_type in _items:
if column_name == 'primary_key':
continue
if column_type == 'bool':
column = Column(column_name, column_type, 1)
elif column_type == 'int':
column = Column(column_name, column_type, 8)
elif column_type == 'float':
column = Column(column_name, column_type, 8)
elif column_type.startswith('str'):
if '[' in column_type:
s = column_type.index('[') + 1
e = column_type.index(']')
size = int(column_type[s:e])
else:
size = None
column = Column(column_name, column_type, size)
else:
raise Exception('unsupported column type')
type_fields[column_name] = column
# add primary_key at the end of dict
if 'primary_key' not in _type_fields:
raise Exception('primary_key is missing, but it is required')
column_names = _type_fields['primary_key']
for column_name in column_names:
column = type_fields[column_name]
if column.type == 'str' and column.size is None:
raise Exception(
'Primary key\'s column with type'
'"str" must have fixed size'
)
type_fields['primary_key'] = column_names
else:
type_fields = None
# table
table = Table(self, table_name, type_fields)
self.tables.append(table)
return table
| {
"repo_name": "yadb/yadb",
"path": "backup/store/database.py",
"copies": "1",
"size": "3405",
"license": "mit",
"hash": 3536149805163537400,
"line_mean": 27.6134453782,
"line_max": 77,
"alpha_frac": 0.4966226138,
"autogenerated": false,
"ratio": 4.439374185136897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5435996798936896,
"avg_score": null,
"num_lines": null
} |
__all__ = ["DatadogNotifier"]
from freight import http
from freight.models import App, Task, TaskStatus, User
from .base import Notifier, NotifierEvent, generate_event_title
class DatadogNotifier(Notifier):
def get_options(self):
return {"webhook_url": {"required": True}}
def should_send_deploy(self, deploy, task, config, event):
if event == NotifierEvent.TASK_STARTED:
return True
if event == NotifierEvent.TASK_FINISHED and task.status == TaskStatus.finished:
return True
return False
def send_deploy(self, deploy, task, config, event):
webhook_url = config["webhook_url"]
app = App.query.get(deploy.app_id)
task = Task.query.get(deploy.task_id)
user = User.query.get(task.user_id)
title = generate_event_title(app, deploy, task, user, event)
# https://docs.datadoghq.com/api/?lang=bash#post-an-event
# This provides a bunch of tags to refine searches in datadog, as well as a title for the deployment
payload = {
"title": title,
"text": title,
"priority": "normal",
"alert_type": "info",
"tags": [
"freight_deploy_name:"
+ app.name
+ "/"
+ deploy.environment
+ "#"
+ str(deploy.number),
"freight_deploy_status:" + str(event),
"freight_app:" + app.name,
"freight_ref:" + task.ref,
"freight_sha:" + task.sha,
],
}
http.post(webhook_url, json=payload)
| {
"repo_name": "getsentry/freight",
"path": "freight/notifiers/datadog.py",
"copies": "1",
"size": "1645",
"license": "apache-2.0",
"hash": -5586916047744406000,
"line_mean": 31.2549019608,
"line_max": 108,
"alpha_frac": 0.5519756839,
"autogenerated": false,
"ratio": 3.9448441247002397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49968198086002397,
"avg_score": null,
"num_lines": null
} |
"""All datastore models live in this module"""
import datetime
from google.appengine.ext import ndb
class Torrent(ndb.Model):
"""A main model for representing an individual Torrent entry."""
title = ndb.StringProperty(indexed=False, required=True)
btih = ndb.StringProperty(indexed=False, required=True) # Infohash
dt = ndb.DateTimeProperty(required=True) # Create/update time, as reported by tracker
nbytes = ndb.IntegerProperty(indexed=False, required=True) # Torrent data size, bytes
description = ndb.TextProperty(required=True)
forum_id = ndb.IntegerProperty(required=True) # for finding torrents in category but not its subcategories
_memcache_timeout = 2592000 # 30 days
class Account(ndb.Model):
"""Represents tracker user account along with its session"""
username = ndb.StringProperty(indexed=False, required=True)
password = ndb.StringProperty(indexed=False, required=True)
userid = ndb.IntegerProperty(indexed=False, required=True)
cookies = ndb.JsonProperty()
_memcache_timeout = 86400 # 1 day
def __repr__(self):
return "<Account username='{}' userid='{}' cookies=[{}]>".format(
self.username, self.userid, self.cookies and self.cookies.keys())
class Category(ndb.Model):
"""Represents category entry"""
title = ndb.StringProperty(indexed=False, required=True)
_memcache_timeout = 86400 # 1 day
class PersistentScalarValue(ndb.Expando):
"""Persistent scalar value that is stored in datastore"""
pass
| {
"repo_name": "notapresent/rutracker_rss",
"path": "models.py",
"copies": "1",
"size": "1583",
"license": "apache-2.0",
"hash": 2891276585133262000,
"line_mean": 36.6904761905,
"line_max": 114,
"alpha_frac": 0.6917245736,
"autogenerated": false,
"ratio": 4.143979057591623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006683696544169195,
"num_lines": 42
} |
__all__ = ['DataStore']
from bisect import bisect_left
DATASTORE_FLAG_SET = 1
DATASTORE_FLAG_DELETE = 2
class DataStore(object):
def __init__(self, pk):
self.pk = tuple(pk) # primary key
# memtable
self.MEM_TABLE_MAX_ITEMS = 4
self.memtable = MemTable(datastore=self)
# memtables
self.memtables = []
def push_memtable_if_necessary(self):
if self.memtable.get_n_items() >= self.MEM_TABLE_MAX_ITEMS:
# append current memtable to memtables
# and make it immutable
memtable = self.memtable
self.memtables.insert(0, memtable)
# create new memtable which is mutable
self.memtable = MemTable(datastore=self)
def get(self, key):
try:
doc = self.memtable.get(key)
except KeyError as e:
for memtable in self.memtables:
try:
doc = memtable.get(key)
break
except KeyError as e:
pass
else:
raise KeyError('Key not found: {}'.format(key))
return doc
def set(self, key, doc):
self.push_memtable_if_necessary()
self.memtable.set(key, doc)
def delete(self, key):
self.push_memtable_if_necessary()
self.memtable.delete(key)
def add(self, doc):
key = tuple(doc[k] for k in self.pk)
self.set(key, doc)
def remove(self, doc):
key = tuple(doc[k] for k in self.pk)
self.delete(key)
def filter(self, *terms):
for doc in self.memtable.filter(*terms):
yield doc
for memtable in self.memtables:
for doc in memtable.filter(*terms):
yield doc
raise StopIteration
class MemTable(object):
def __init__(self, datastore):
self.datastore = datastore
# self.flags = []
# self.keys = []
self.indexes = {ppk: MemIndex(self.datastore, ppk) for ppk in self.datastore.pk}
self.indexes[self.datastore.pk] = MemIndex(self.datastore, self.datastore.pk)
self.values = []
def get_n_items(self):
return len(self.values)
def get(self, key):
# i = bisect_left(self.keys, key)
# if i == len(self.keys):
# raise KeyError('Key not found: {}'.format(key))
# # flag
# flag = self.flags[i]
# if flag == DATASTORE_FLAG_DELETE:
# raise KeyError('Key not found: {}'.format(key))
i = self.indexes[self.datastore.pk].get(key)
# check key of document
doc = self.values[i]
doc_key = tuple(doc[k] for k in self.datastore.pk)
if key != doc_key:
raise KeyError('Key not found: {}'.format(key))
return doc
def set(self, key, doc):
# i = bisect_left(self.keys, key)
# if i == len(self.keys):
# self.flags.insert(i, DATASTORE_FLAG_SET)
# self.keys.insert(i, key)
# self.values.insert(i, doc)
# else:
# # check key
# old_key = self.keys[i]
# if old_key == key:
# self.flags[i] = DATASTORE_FLAG_SET
# self.values[i] = doc
# else:
# self.flags.insert(i, DATASTORE_FLAG_SET)
# self.keys.insert(i, key)
# self.values.insert(i, doc)
i = self.indexes[self.datastore.pk].bisect_left(key, -1)
def delete(self, key):
i = bisect_left(self.keys, key)
if len(self.keys) > 0 and self.keys[i] == key:
self.flags[i] = DATASTORE_FLAG_DELETE
else:
# default empty doc
doc = {}
self.flags.insert(i, DATASTORE_FLAG_DELETE)
self.keys.insert(i, key)
self.values.insert(i, doc)
def filter(self, *terms):
raise StopIteration
class MemIndex(object):
def __init__(self, datastore, ppk):
self.datastore = datastore
self.ppk = ppk
self.flags = []
self.keys = []
def get(self, key):
i = bisect_left(self.keys, key)
if i == len(self.keys):
raise KeyError('Key not found: {}'.format(key))
# flag
flag = self.flags[i]
if flag == DATASTORE_FLAG_DELETE:
raise KeyError('Key not found: {}'.format(key))
return i
def set(self, key, doc):
i = bisect_left(self.keys, key)
if i == len(self.keys):
self.flags.insert(i, DATASTORE_FLAG_SET)
self.keys.insert(i, key)
else:
# check key
old_key = self.keys[i]
if old_key == key:
self.flags[i] = DATASTORE_FLAG_SET
else:
self.flags.insert(i, DATASTORE_FLAG_SET)
self.keys.insert(i, key)
return i
def delete(self, key):
i = bisect_left(self.keys, key)
if len(self.keys) > 0 and self.keys[i] == key:
self.flags[i] = DATASTORE_FLAG_DELETE
else:
# default empty doc
doc = {}
self.flags.insert(i, DATASTORE_FLAG_DELETE)
self.keys.insert(i, key)
return i
class SSTable(object):
def __init__(self, datastore):
self.datastore = datastore
def get(self, key):
pass
def set(self, key, doc):
pass
def delete(self, key):
pass
if __name__ == '__main__':
ds = DataStore(pk=['a', 'b', 'c'])
for i in range(0, 3):
for j in range(0, 3):
for k in range(0, 3):
doc = {'a': i, 'b': float(j), 'c': str(k), 'd': '{}-{}-{}'.format(i, j, k)}
ds.add(doc)
key = (1, 1.1, '1')
# print(ds.get(key))
try:
print(ds.get(key))
except KeyError as e:
print(e)
key = (1, 1.0, '1')
print(ds.get(key))
key = (2, 1.0, '2')
ds.delete(key)
key = (2, 2.0, '2')
ds.delete(key)
key = (1, 1.0, '1')
ds.delete(key)
try:
print(ds.get(key))
except KeyError as e:
print(e)
key = (1, 1.0, '1')
doc = {'a': 1, 'b': 1.0, 'c': '1', 'd': 'SOME NEW VALUE'}
ds.set(key, doc)
key = (1, 1.0, '1')
print(ds.get(key))
"""
"""
# debug
print('-' * 24)
for n in zip(ds.memtable.flags, ds.memtable.keys, ds.memtable.values):
print(n)
for memtable in ds.memtables:
print('-' * 24)
for n in zip(memtable.flags, memtable.keys, memtable.values):
print(n)
"""
"""
docs = ds.filter(('a', '>=', 1), ('a', '<=', 2))
print(docs)
for doc in docs:
print(doc)
| {
"repo_name": "mtasic85/datastore",
"path": "old/datastore2.py",
"copies": "1",
"size": "6763",
"license": "mit",
"hash": -2113651367527779600,
"line_mean": 24.0481481481,
"line_max": 91,
"alpha_frac": 0.5030311992,
"autogenerated": false,
"ratio": 3.4878803506962353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4490911549896235,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DataStore']
import os
import re
class DataStore(object):
def __init__(self, path, name, primary_key_columns, columns):
self.path = path
self.name = name
self.primary_key_columns = primary_key_columns
self.columns = columns
self.memtable = MemTable(self)
# create dir for datastore
self.dir_path = os.path.join(self.path, self.name)
os.makedirs(self.dir_path)
self.revisons = []
for entry in os.scandir(self.dir_path):
if not entry.is_file():
continue
if not entry.name.endswith('.sstable'):
continue
revison = re.findall(r'\b\d+\b', entry.name)[0]
self.revisons.append(revison)
self.revisons.sort()
def close(self):
self.memtable.close()
def add(self, doc):
self.memtable.add(doc)
def get(self, *key):
doc = self.memtable.get(key)
return doc
def has(self, *key):
has = self.memtable.has(key)
return has
def remove(self, *key):
self.memtable.remove(key)
class MemTable(object):
MAX_DOCS = 100
def __init__(self, data_store):
self.data_store = data_store
self.primary_key_columns = self.data_store.primary_key_columns
self.docs = {}
def close(self):
pass
def add(self, doc):
key = tuple([doc[n] for n in self.primary_key_columns])
self.docs[key] = doc
def get(self, key):
doc = self.docs[key]
return doc
def has(self, key):
has = key in self.docs
return has
def remove(self, key):
del self.docs[key]
class SSTable(object):
def __init__(self, data_store, revision):
self.data_store = data_store
self.revision = revision
@classmethod
def create_sstable(cls, data_store, revision):
pass
class PrimaryKey(object):
def __init__(self, data_store, revision):
self.data_store = data_store
self.revision = revision
class Index(object):
def __init__(self, data_store, revision):
self.data_store = data_store
self.revision = revision
class BloomFilter(object):
def __init__(self, data_store, revision):
self.data_store = data_store
self.revision = revision
if __name__ == '__main__':
ds = DataStore(os.path.join('tmp', 'store0'), ['id0', 'id1'], ['first_name', 'last_name'])
ds.add({'id0': 1, 'id1': 2, 'first_name': 'Marko', 'last_name': 'Tasic'})
ds.add({'id0': 2, 'id1': 3, 'first_name': 'Milica', 'last_name': 'Tasic'})
ds.add({'id0': 3, 'id1': 4, 'first_name': 'Milos', 'last_name': 'Milosevic'})
ds.add({'id0': 4, 'id1': 5, 'first_name': 'Milan', 'last_name': 'Zdravkovic'})
print(ds.get(1, 2))
print(ds.get(4, 5))
print(ds.has(2, 3))
ds.remove(2, 3)
print(ds.has(2, 3))
ds.close()
| {
"repo_name": "mtasic85/datastore",
"path": "old/datastore.py",
"copies": "1",
"size": "2930",
"license": "mit",
"hash": -2572336892628854300,
"line_mean": 25.3963963964,
"line_max": 94,
"alpha_frac": 0.5641638225,
"autogenerated": false,
"ratio": 3.2884399551066217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4352603777606622,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DataTable', 'DataRow']
import FileIO
__author__ = "Charles R Schmidt <schmidtc@gmail.com>"
class DataTable(FileIO.FileIO):
""" DataTable provides additional functionality to FileIO for data table file tables
FileIO Handlers that provide data tables should subclass this instead of FileIO """
class _By_Col:
def __init__(self, parent):
self.p = parent
def __repr__(self):
return "keys: " + self.p.header.__repr__()
def __getitem__(self, key):
return self.p._get_col(key)
def __setitem__(self, key, val):
self.p.cast(key, val)
def __call__(self, key):
return self.p._get_col(key)
def __init__(self, *args, **kwargs):
FileIO.FileIO.__init__(self, *args, **kwargs)
def __repr__(self):
return 'DataTable: % s' % self.dataPath
def __len__(self):
""" __len__ should be implemented by DataTable Subclasses """
raise NotImplementedError
@property
def by_col(self):
return self._By_Col(self)
def _get_col(self, key):
""" returns the column vector
"""
if not self.header:
raise AttributeError('Please set the header')
if key in self.header:
return self[:, self.header.index(key)]
else:
raise AttributeError('Field: % s does not exist in header' % key)
def __getitem__(self, key):
""" DataTables fully support slicing in 2D,
To provide slicing, handlers must provide __len__
Slicing accepts up to two arguments.
Syntax,
table[row]
table[row, col]
table[row_start:row_stop]
table[row_start:row_stop:row_step]
table[:, col]
table[:, col_start:col_stop]
etc.
ALL indices are Zero-Offsets,
i.e.
#>>> assert index in range(0, len(table))
"""
prevPos = self.tell()
if issubclass(type(key), basestring):
raise TypeError("index should be int or slice")
if issubclass(type(key), int) or isinstance(key, slice):
rows = key
cols = None
elif len(key) > 2:
raise TypeError("DataTables support two dimmensional slicing, % d slices provided" % len(key))
elif len(key) == 2:
rows, cols = key
else:
raise TypeError("Key: % r, is confusing me. I don't know what to do" % key)
if isinstance(rows, slice):
row_start, row_stop, row_step = rows.indices(len(self))
self.seek(row_start)
data = [self.next() for i in range(row_start, row_stop, row_step)]
else:
self.seek(slice(rows).indices(len(self))[1])
data = [self.next()]
if cols is not None:
if isinstance(cols, slice):
col_start, col_stop, col_step = cols.indices(len(data[0]))
data = [r[col_start:col_stop:col_step] for r in data]
else:
#col_start, col_stop, col_step = cols, cols+1, 1
data = [r[cols] for r in data]
self.seek(prevPos)
return data
def _test():
import doctest
doctest.testmod(verbose=True)
if __name__ == '__main__':
_test()
| {
"repo_name": "pombreda/pysal",
"path": "pysal/core/Tables.py",
"copies": "5",
"size": "3347",
"license": "bsd-3-clause",
"hash": 3963793400627112000,
"line_mean": 31.8137254902,
"line_max": 107,
"alpha_frac": 0.5383925904,
"autogenerated": false,
"ratio": 3.9192037470725993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6957596337472599,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DataTable']
from . import fileio
from ..common import requires
from warnings import warn
import numpy as np
__author__ = "Charles R Schmidt <schmidtc@gmail.com>"
class DataTable(fileio.FileIO):
""" DataTable provides additional functionality to FileIO for data table file tables
FileIO Handlers that provide data tables should subclass this instead of FileIO """
class _By_Col:
def __init__(self, parent):
self.p = parent
def __repr__(self):
return "keys: " + self.p.header.__repr__()
def __getitem__(self, key):
return self.p._get_col(key)
def __setitem__(self, key, val):
self.p.cast(key, val)
def __call__(self, key):
return self.p._get_col(key)
def __init__(self, *args, **kwargs):
fileio.FileIO.__init__(self, *args, **kwargs)
def __repr__(self):
return 'DataTable: % s' % self.dataPath
def __len__(self):
""" __len__ should be implemented by DataTable Subclasses """
raise NotImplementedError
@property
def by_col(self):
return self._By_Col(self)
def _get_col(self, key):
""" returns the column vector
"""
if not self.header:
raise AttributeError('Please set the header')
if key in self.header:
return self[:, self.header.index(key)]
else:
raise AttributeError('Field: % s does not exist in header' % key)
def by_col_array(self, *args):
"""
Return columns of table as a numpy array.
Parameters
----------
*args: any number of strings of length k
names of variables to extract
Returns
-------
implicit: numpy array of shape (n,k)
Notes
-----
If the variables are not all of the same data type, then numpy rules
for casting will result in a uniform type applied to all variables.
If only strings are passed to the function, then an array with those
columns will be constructed.
If only one list of strings is passed, the output is identical to those
strings being passed.
If at least one list is passed and other strings or lists are passed,
this returns a tuple containing arrays constructed from each positional
argument.
Examples
--------
>>> import pysal.lib
>>> dbf = pysal.lib.io.open(pysal.lib.examples.get_path('NAT.dbf'))
>>> hr = dbf.by_col_array('HR70', 'HR80')
>>> hr[0:5]
array([[ 0. , 8.85582713],
[ 0. , 17.20874204],
[ 1.91515848, 3.4507747 ],
[ 1.28864319, 3.26381409],
[ 0. , 7.77000777]])
>>> hr = dbf.by_col_array(['HR80', 'HR70'])
>>> hr[0:5]
array([[ 8.85582713, 0. ],
[17.20874204, 0. ],
[ 3.4507747 , 1.91515848],
[ 3.26381409, 1.28864319],
[ 7.77000777, 0. ]])
>>> hr = dbf.by_col_array(['HR80'])
>>> hr[0:5]
array([[ 8.85582713],
[17.20874204],
[ 3.4507747 ],
[ 3.26381409],
[ 7.77000777]])
Numpy only supports homogeneous arrays. See Notes above.
>>> hr = dbf.by_col_array('STATE_NAME', 'HR80')
>>> hr[0:5]
array([['Minnesota', '8.8558271343'],
['Washington', '17.208742041'],
['Washington', '3.4507746989'],
['Washington', '3.2638140931'],
['Washington', '7.77000777']], dtype='<U20')
>>> y, X = dbf.by_col_array('STATE_NAME', ['HR80', 'HR70'])
>>> y[0:5]
array([['Minnesota'],
['Washington'],
['Washington'],
['Washington'],
['Washington']], dtype='<U20')
>>> X[0:5]
array([[ 8.85582713, 0. ],
[17.20874204, 0. ],
[ 3.4507747 , 1.91515848],
[ 3.26381409, 1.28864319],
[ 7.77000777, 0. ]])
"""
if any([isinstance(arg, list) for arg in args]):
results = []
for namelist in args:
if isinstance(namelist, str):
results.append([self._get_col(namelist)])
else:
results.append([self._get_col(vbl) for vbl in namelist])
if len(results) == 1:
return np.array(results[0]).T
else:
return tuple(np.array(lst).T for lst in results)
else:
return np.array([self._get_col(name) for name in args]).T
def __getitem__(self, key):
""" DataTables fully support slicing in 2D,
To provide slicing, handlers must provide __len__
Slicing accepts up to two arguments.
Syntax,
table[row]
table[row, col]
table[row_start:row_stop]
table[row_start:row_stop:row_step]
table[:, col]
table[:, col_start:col_stop]
etc.
ALL indices are Zero-Offsets,
i.e.
#>>> assert index in range(0, len(table))
"""
prevPos = self.tell()
if issubclass(type(key), str):
raise TypeError("index should be int or slice")
if issubclass(type(key), int) or isinstance(key, slice):
rows = key
cols = None
elif len(key) > 2:
raise TypeError("DataTables support two dimmensional slicing, % d slices provided" % len(key))
elif len(key) == 2:
rows, cols = key
else:
raise TypeError("Key: % r, is confusing me. I don't know what to do" % key)
if isinstance(rows, slice):
row_start, row_stop, row_step = rows.indices(len(self))
self.seek(row_start)
data = [next(self) for i in range(row_start, row_stop, row_step)]
else:
self.seek(slice(rows).indices(len(self))[1])
data = [next(self)]
if cols is not None:
if isinstance(cols, slice):
col_start, col_stop, col_step = cols.indices(len(data[0]))
data = [r[col_start:col_stop:col_step] for r in data]
else:
#col_start, col_stop, col_step = cols, cols+1, 1
data = [r[cols] for r in data]
self.seek(prevPos)
return data
@requires('pandas')
def to_df(self, n=-1, read_shp=None, **df_kws):
import pandas as pd
self.seek(0)
header = self.header
records = self.read(n)
df = pd.DataFrame(records, columns=header, **df_kws)
if read_shp is not False:
if read_shp is True or self.dataPath.endswith('.dbf'):
read_shp = self.dataPath[:-3] + 'shp'
try:
from .geotable.shp import shp2series
df['geometry'] = shp2series(self.dataPath[:-3] + 'shp')
except IOError as e:
warn('Encountered the following error in attempting to read'
' the shapefile {}. Proceeding with read, but the error'
' will be reproduced below:\n'
' {}'.format(self.dataPath[:-3]+'shp', e))
return df
def _test():
import doctest
doctest.testmod(verbose=True)
if __name__ == '__main__':
_test()
| {
"repo_name": "lixun910/pysal",
"path": "pysal/lib/io/tables.py",
"copies": "1",
"size": "7601",
"license": "bsd-3-clause",
"hash": 3851538012369659000,
"line_mean": 33.55,
"line_max": 107,
"alpha_frac": 0.5040126299,
"autogenerated": false,
"ratio": 3.8681933842239187,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4872206014123919,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DataTable']
import FileIO
from ..common import requires
from warnings import warn
import numpy as np
__author__ = "Charles R Schmidt <schmidtc@gmail.com>"
class DataTable(FileIO.FileIO):
""" DataTable provides additional functionality to FileIO for data table file tables
FileIO Handlers that provide data tables should subclass this instead of FileIO """
class _By_Col:
def __init__(self, parent):
self.p = parent
def __repr__(self):
return "keys: " + self.p.header.__repr__()
def __getitem__(self, key):
return self.p._get_col(key)
def __setitem__(self, key, val):
self.p.cast(key, val)
def __call__(self, key):
return self.p._get_col(key)
def __init__(self, *args, **kwargs):
FileIO.FileIO.__init__(self, *args, **kwargs)
def __repr__(self):
return 'DataTable: % s' % self.dataPath
def __len__(self):
""" __len__ should be implemented by DataTable Subclasses """
raise NotImplementedError
@property
def by_col(self):
return self._By_Col(self)
def _get_col(self, key):
""" returns the column vector
"""
if not self.header:
raise AttributeError('Please set the header')
if key in self.header:
return self[:, self.header.index(key)]
else:
raise AttributeError('Field: % s does not exist in header' % key)
def by_col_array(self, *args):
"""
Return columns of table as a numpy array.
Parameters
----------
*args: any number of strings of length k
names of variables to extract
Returns
-------
implicit: numpy array of shape (n,k)
Notes
-----
If the variables are not all of the same data type, then numpy rules
for casting will result in a uniform type applied to all variables.
If only strings are passed to the function, then an array with those
columns will be constructed.
If only one list of strings is passed, the output is identical to those
strings being passed.
If at least one list is passed and other strings or lists are passed,
this returns a tuple containing arrays constructed from each positional
argument.
Examples
--------
>>> import pysal as ps
>>> dbf = ps.open(ps.examples.get_path('NAT.dbf'))
>>> hr = dbf.by_col_array('HR70', 'HR80')
>>> hr[0:5]
array([[ 0. , 8.85582713],
[ 0. , 17.20874204],
[ 1.91515848, 3.4507747 ],
[ 1.28864319, 3.26381409],
[ 0. , 7.77000777]])
>>> hr = dbf.by_col_array(['HR80', 'HR70'])
>>> hr[0:5]
array([[ 8.85582713, 0. ],
[ 17.20874204, 0. ],
[ 3.4507747 , 1.91515848],
[ 3.26381409, 1.28864319],
[ 7.77000777, 0. ]])
>>> hr = dbf.by_col_array(['HR80'])
>>> hr[0:5]
array([[ 8.85582713],
[ 17.20874204],
[ 3.4507747 ],
[ 3.26381409],
[ 7.77000777]])
Numpy only supports homogeneous arrays. See Notes above.
>>> hr = dbf.by_col_array('STATE_NAME', 'HR80')
>>> hr[0:5]
array([['Minnesota', '8.8558271343'],
['Washington', '17.208742041'],
['Washington', '3.4507746989'],
['Washington', '3.2638140931'],
['Washington', '7.77000777']],
dtype='|S20')
>>> y, X = dbf.by_col_array('STATE_NAME', ['HR80', 'HR70'])
>>> y[0:5]
array([['Minnesota'],
['Washington'],
['Washington'],
['Washington'],
['Washington']],
dtype='|S20')
>>> X[0:5]
array([[ 8.85582713, 0. ],
[ 17.20874204, 0. ],
[ 3.4507747 , 1.91515848],
[ 3.26381409, 1.28864319],
[ 7.77000777, 0. ]])
"""
if any([isinstance(arg, list) for arg in args]):
results = []
for namelist in args:
if isinstance(namelist, str):
results.append([self._get_col(namelist)])
else:
results.append([self._get_col(vbl) for vbl in namelist])
if len(results) == 1:
return np.array(results[0]).T
else:
return tuple(np.array(lst).T for lst in results)
else:
return np.array([self._get_col(name) for name in args]).T
def __getitem__(self, key):
""" DataTables fully support slicing in 2D,
To provide slicing, handlers must provide __len__
Slicing accepts up to two arguments.
Syntax,
table[row]
table[row, col]
table[row_start:row_stop]
table[row_start:row_stop:row_step]
table[:, col]
table[:, col_start:col_stop]
etc.
ALL indices are Zero-Offsets,
i.e.
#>>> assert index in range(0, len(table))
"""
prevPos = self.tell()
if issubclass(type(key), basestring):
raise TypeError("index should be int or slice")
if issubclass(type(key), int) or isinstance(key, slice):
rows = key
cols = None
elif len(key) > 2:
raise TypeError("DataTables support two dimmensional slicing, % d slices provided" % len(key))
elif len(key) == 2:
rows, cols = key
else:
raise TypeError("Key: % r, is confusing me. I don't know what to do" % key)
if isinstance(rows, slice):
row_start, row_stop, row_step = rows.indices(len(self))
self.seek(row_start)
data = [self.next() for i in range(row_start, row_stop, row_step)]
else:
self.seek(slice(rows).indices(len(self))[1])
data = [self.next()]
if cols is not None:
if isinstance(cols, slice):
col_start, col_stop, col_step = cols.indices(len(data[0]))
data = [r[col_start:col_stop:col_step] for r in data]
else:
#col_start, col_stop, col_step = cols, cols+1, 1
data = [r[cols] for r in data]
self.seek(prevPos)
return data
@requires('pandas')
def to_df(self, n=-1, read_shp=None, **df_kws):
import pandas as pd
self.seek(0)
header = self.header
records = self.read(n)
df = pd.DataFrame(records, columns=header, **df_kws)
if read_shp is not False:
if read_shp is True or self.dataPath.endswith('.dbf'):
read_shp = self.dataPath[:-3] + 'shp'
try:
import pysal.contrib.pdio.shp as shp
df['geometry'] = shp.shp2series(self.dataPath[:-3] + 'shp')
except IOError as e:
warn('Encountered the following error in attempting to read'
' the shapefile {}. Proceeding with read, but the error'
' will be reproduced below:\n'
' {}'.format(self.dataPath[:-3]+'shp', e))
return df
def _test():
import doctest
doctest.testmod(verbose=True)
if __name__ == '__main__':
_test()
| {
"repo_name": "ljwolf/pysal_core",
"path": "libpysal/io/Tables.py",
"copies": "8",
"size": "7658",
"license": "bsd-3-clause",
"hash": 2594138759264239600,
"line_mean": 33.3408071749,
"line_max": 107,
"alpha_frac": 0.4992165056,
"autogenerated": false,
"ratio": 3.8657243816254416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0031578310414727235,
"num_lines": 223
} |
__all__ = ['DataTable']
import FileIO
import numpy as np
__author__ = "Charles R Schmidt <schmidtc@gmail.com>"
class DataTable(FileIO.FileIO):
""" DataTable provides additional functionality to FileIO for data table file tables
FileIO Handlers that provide data tables should subclass this instead of FileIO """
class _By_Col:
def __init__(self, parent):
self.p = parent
def __repr__(self):
return "keys: " + self.p.header.__repr__()
def __getitem__(self, key):
return self.p._get_col(key)
def __setitem__(self, key, val):
self.p.cast(key, val)
def __call__(self, key):
return self.p._get_col(key)
def __init__(self, *args, **kwargs):
FileIO.FileIO.__init__(self, *args, **kwargs)
def __repr__(self):
return 'DataTable: % s' % self.dataPath
def __len__(self):
""" __len__ should be implemented by DataTable Subclasses """
raise NotImplementedError
@property
def by_col(self):
return self._By_Col(self)
def _get_col(self, key):
""" returns the column vector
"""
if not self.header:
raise AttributeError('Please set the header')
if key in self.header:
return self[:, self.header.index(key)]
else:
raise AttributeError('Field: % s does not exist in header' % key)
def by_col_array(self, variable_names):
"""
Return columns of table as a numpy array
Parameters
----------
variable_names: list of strings of length k
names of variables to extract
Returns
-------
implicit: numpy array of shape (n,k)
Notes
-----
If the variables are not all of the same data type, then numpy rules
for casting will result in a uniform type applied to all variables.
Examples
--------
>>> import pysal as ps
>>> dbf = ps.open(ps.examples.get_path('NAT.dbf'))
>>> hr = dbf.by_col_array(['HR70', 'HR80'])
>>> hr[0:5]
array([[ 0. , 8.85582713],
[ 0. , 17.20874204],
[ 1.91515848, 3.4507747 ],
[ 1.28864319, 3.26381409],
[ 0. , 7.77000777]])
>>> hr = dbf.by_col_array(['HR80', 'HR70'])
>>> hr[0:5]
array([[ 8.85582713, 0. ],
[ 17.20874204, 0. ],
[ 3.4507747 , 1.91515848],
[ 3.26381409, 1.28864319],
[ 7.77000777, 0. ]])
>>> hr = dbf.by_col_array(['HR80'])
>>> hr[0:5]
array([[ 8.85582713],
[ 17.20874204],
[ 3.4507747 ],
[ 3.26381409],
[ 7.77000777]])
Numpy only supports homogeneous arrays. See Notes above.
>>> hr = dbf.by_col_array(['STATE_NAME', 'HR80'])
>>> hr[0:5]
array([['Minnesota', '8.8558271343'],
['Washington', '17.208742041'],
['Washington', '3.4507746989'],
['Washington', '3.2638140931'],
['Washington', '7.77000777']],
dtype='|S20')
"""
lst = [self._get_col(variable) for variable in variable_names]
return np.array(lst).T
def __getitem__(self, key):
""" DataTables fully support slicing in 2D,
To provide slicing, handlers must provide __len__
Slicing accepts up to two arguments.
Syntax,
table[row]
table[row, col]
table[row_start:row_stop]
table[row_start:row_stop:row_step]
table[:, col]
table[:, col_start:col_stop]
etc.
ALL indices are Zero-Offsets,
i.e.
#>>> assert index in range(0, len(table))
"""
prevPos = self.tell()
if issubclass(type(key), basestring):
raise TypeError("index should be int or slice")
if issubclass(type(key), int) or isinstance(key, slice):
rows = key
cols = None
elif len(key) > 2:
raise TypeError("DataTables support two dimmensional slicing, % d slices provided" % len(key))
elif len(key) == 2:
rows, cols = key
else:
raise TypeError("Key: % r, is confusing me. I don't know what to do" % key)
if isinstance(rows, slice):
row_start, row_stop, row_step = rows.indices(len(self))
self.seek(row_start)
data = [self.next() for i in range(row_start, row_stop, row_step)]
else:
self.seek(slice(rows).indices(len(self))[1])
data = [self.next()]
if cols is not None:
if isinstance(cols, slice):
col_start, col_stop, col_step = cols.indices(len(data[0]))
data = [r[col_start:col_stop:col_step] for r in data]
else:
#col_start, col_stop, col_step = cols, cols+1, 1
data = [r[cols] for r in data]
self.seek(prevPos)
return data
def _test():
import doctest
doctest.testmod(verbose=True)
if __name__ == '__main__':
_test()
| {
"repo_name": "darribas/pysal",
"path": "pysal/core/Tables.py",
"copies": "8",
"size": "5347",
"license": "bsd-3-clause",
"hash": -8968935362149206000,
"line_mean": 31.0179640719,
"line_max": 107,
"alpha_frac": 0.4999064896,
"autogenerated": false,
"ratio": 3.7814710042432815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8281377493843282,
"avg_score": null,
"num_lines": null
} |
__all__ = ['date_range', 'get_past_date', 'parse_date']
import datetime
def date_range(since=datetime.date.today(), until=datetime.date.today()):
"""Get date range from `since` until `until`.
:type since: datetime.date
:param since: Earliest date of the range.
:type until: datetime.date
:param until: Latest date of the range.
:rtype: iterable
:returns: iterable of datetime.date instances for each date within the range.
"""
while since <= until:
yield until
until -= datetime.timedelta(days=1)
def get_past_date(days=0, weeks=0):
"""Get past n days and m weeks ago date. Defaults to today's date.
:type days: int
:param days: Number of days ago if positive, later if negative.
:type weeks: int
:param weeks: Number of weeks ago if positive, later if negative.
"""
return (datetime.date.today() - datetime.timedelta(days=days, weeks=weeks))
def parse_date(date_string, format='%Y-%m-%d'):
return datetime.datetime.strptime(date_string, format).date()
| {
"repo_name": "hans-t/autos",
"path": "autos/utils/date.py",
"copies": "1",
"size": "1051",
"license": "mit",
"hash": -2142248918233033700,
"line_mean": 26.6578947368,
"line_max": 81,
"alpha_frac": 0.6660323501,
"autogenerated": false,
"ratio": 3.687719298245614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9852147027036242,
"avg_score": 0.0003209242618741977,
"num_lines": 38
} |
__all__ = ['DateTimeProtocolCommand']
import time
import random
import datetime
from print_colors import PrintColors
from contact import Contact
from protocol_command import ProtocolCommand
class DateTimeProtocolCommand(ProtocolCommand):
def start(self):
self.req()
def stop(self):
raise NotImplementedError
def req(self):
c = self.node.rt.contacts.random(without_id=self.node.id)
if not c:
# schedule next discover
self.node.loop.call_later(0.0 + random.random() * 10.0, self.req)
return
args = ()
kwargs = {}
res = (args, kwargs)
# build message
message_data = self.node.build_message(
self.protocol_major_version,
self.protocol_minor_version,
self.PROTOCOL_REQ,
self.protocol_command_code,
res,
)
# force del
del args
del kwargs
del res
# send message
self.node.send_message(message_data, c.remote_host, c.remote_port)
# schedule next discover
self.node.loop.call_later(0.0 + random.random() * 10.0, self.req)
def on_req(self, remote_host, remote_port, *args, **kwargs):
# forward to res
self.res(remote_host, remote_port, *args, **kwargs)
def res(self, remote_host, remote_port, *args, **kwargs):
# response
res = {
'datetime': datetime.datetime.utcnow().isoformat(),
}
# build message
message_data = self.node.build_message(
self.protocol_major_version,
self.protocol_minor_version,
self.PROTOCOL_RES,
self.protocol_command_code,
res,
)
# force del
del res
# send message
self.node.send_message(message_data, remote_host, remote_port)
def on_res(self, remote_host, remote_port, res):
dt = res['datetime']
print('datetime {}:{} {}'.format(remote_host, remote_port, dt))
| {
"repo_name": "mtasic85/routingtable",
"path": "datetime_protocol_command.py",
"copies": "1",
"size": "2043",
"license": "mit",
"hash": 3825377836880140300,
"line_mean": 25.5324675325,
"line_max": 77,
"alpha_frac": 0.5756240822,
"autogenerated": false,
"ratio": 4.029585798816568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0028679653679653683,
"num_lines": 77
} |
__all__ = ['daub','qmf','cascade','morlet']
import numpy as np
from numpy.dual import eig
from scipy.misc import comb
from scipy import linspace, pi, exp
def daub(p):
"""The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2. There are 2p filter coefficients.
"""
sqrt = np.sqrt
assert(p>=1)
if p==1:
c = 1/sqrt(2)
return np.array([c,c])
elif p==2:
f = sqrt(2)/8
c = sqrt(3)
return f*np.array([1+c,3+c,3-c,1-c])
elif p==3:
tmp = 12*sqrt(10)
z1 = 1.5 + sqrt(15+tmp)/6 - 1j*(sqrt(15)+sqrt(tmp-15))/6
z1c = np.conj(z1)
f = sqrt(2)/8
d0 = np.real((1-z1)*(1-z1c))
a0 = np.real(z1*z1c)
a1 = 2*np.real(z1)
return f/d0*np.array([a0, 3*a0-a1, 3*a0-3*a1+1, a0-3*a1+3, 3-a1, 1])
elif p<35:
# construct polynomial and factor it
if p<35:
P = [comb(p-1+k,k,exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p-1+k,k,exact=1)/4.0**k for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1,1])**p
q = np.poly1d([1])
for k in range(p-1):
yval = yj[k]
part = 2*sqrt(yval*(yval-1))
const = 1-2*yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1,-z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError, "Polynomial factorization does not work "\
"well for p too large."
def qmf(hk):
"""Return high-pass qmf filter from low-pass
"""
N = len(hk)-1
asgn = [{0:1,1:-1}[k%2] for k in range(N+1)]
return hk[::-1]*np.array(asgn)
def wavedec(amn,hk):
gk = qmf(hk)
return NotImplemented
def cascade(hk,J=7):
"""(x,phi,psi) at dyadic points K/2**J from filter coefficients.
Inputs:
hk -- coefficients of low-pass filter
J -- values will be computed at grid points $K/2^J$
Outputs:
x -- the dyadic points $K/2^J$ for $K=0...N*(2^J)-1$
where len(hk)=len(gk)=N+1
phi -- the scaling function phi(x) at x
$\phi(x) = \sum_{k=0}^{N} h_k \phi(2x-k)$
psi -- the wavelet function psi(x) at x
$\psi(x) = \sum_{k=0}^N g_k \phi(2x-k)$
Only returned if gk is not None
Algorithm:
Uses the vector cascade algorithm described by Strang and Nguyen in
"Wavelets and Filter Banks"
Builds a dictionary of values and slices for quick reuse.
Then inserts vectors into final vector at then end
"""
N = len(hk)-1
if (J > 30 - np.log2(N+1)):
raise ValueError, "Too many levels."
if (J < 1):
raise ValueError, "Too few levels."
# construct matrices needed
nn,kk = np.ogrid[:N,:N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk,0]
gk = qmf(hk)
tgk = np.r_[gk,0]
indx1 = np.clip(2*nn-kk,-1,N+1)
indx2 = np.clip(2*nn-kk+1,-1,N+1)
m = np.zeros((2,2,N,N),'d')
m[0,0] = np.take(thk,indx1,0)
m[0,1] = np.take(thk,indx2,0)
m[1,0] = np.take(tgk,indx1,0)
m[1,1] = np.take(tgk,indx2,0)
m *= s2
# construct the grid of points
x = np.arange(0,N*(1<<J),dtype=np.float) / (1<<J)
phi = 0*x
psi = 0*x
# find phi0, and phi1
lam, v = eig(m[0,0])
ind = np.argmin(np.absolute(lam-1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:,ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {}
bitdic['0'] = v / sm
bitdic['1'] = np.dot(m[0,1],bitdic['0'])
step = 1<<J
phi[::step] = bitdic['0']
phi[(1<<(J-1))::step] = bitdic['1']
psi[::step] = np.dot(m[1,0],bitdic['0'])
psi[(1<<(J-1))::step] = np.dot(m[1,1],bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2,J+1):
newkeys = ['%d%s' % (xx,yy) for xx in [0,1] for yy in prevkeys]
fac = 1<<(J-level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1<<(level-1-pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0,ii],pastphi)
bitdic[key] = temp
phi[num*fac::step] = temp
psi[num*fac::step] = np.dot(m[1,ii],pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float
Omega0
s : float
Scaling factor, windowed from -s*2*pi to +s*2*pi.
complete : bool
Whether to use the complete or the standard version.
Notes:
------
The standard version:
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that, this simplified version can cause
admissibility problems at low values of w.
The complete version:
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
The complete version of the Morlet wavelet, with a correction
term to improve admissibility. For w greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to s.
The fundamental frequency of this wavelet in Hz is given
by f = 2*s*w*r / M where r is the sampling rate.
"""
x = linspace(-s*2*pi,s*2*pi,M)
output = exp(1j*w*x)
if complete:
output -= exp(-0.5*(w**2))
output *= exp(-0.5*(x**2)) * pi**(-0.25)
return output
| {
"repo_name": "stefanv/scipy3",
"path": "scipy/signal/wavelets.py",
"copies": "2",
"size": "6488",
"license": "bsd-3-clause",
"hash": 5878597628813059000,
"line_mean": 28.8986175115,
"line_max": 82,
"alpha_frac": 0.5339087546,
"autogenerated": false,
"ratio": 2.9370755998189226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4470984354418922,
"avg_score": null,
"num_lines": null
} |
__all__ = ["DayBoxOffice"]
import tushare as ts
from flask import request,json
from flask_restful import Resource
def get_day_boxoffice(day=None):
if day == None:
result = ts.day_boxoffice().to_json()
else:
try:
result = ts.day_boxoffice(day).to_json()
except Exception as e:
result = json.dumps({"error":True,"message":"can not get the data, format date as YYYY-M-D"})
return result
class DayBoxOffice(Resource):
"""获取单日电影票房数据,默认为上一日的电影票房,可输入参数获取指定日期的票房。
参数说明:
date: 日期(YYYY-MM-DD),默认为上一日
返回值说明:
AvgPrice 平均票价
AvpPeoPle 场均人次
BoxOffice 单日票房(万)
BoxOffice_Up 环比变化 (%)
IRank 排名
MovieDay 上映天数
MovieName 影片名
SumBoxOffice 累计票房(万)
WomIndex 口碑指数
"""
def get(self):
date = request.args.get("date")
return get_day_boxoffice(date)
| {
"repo_name": "FinaceInfo/Chinese-box-office-info",
"path": "api/boxoffice/day_boxoffice.py",
"copies": "1",
"size": "1062",
"license": "mit",
"hash": 2432581363941076500,
"line_mean": 22.2972972973,
"line_max": 105,
"alpha_frac": 0.6252900232,
"autogenerated": false,
"ratio": 2.1933842239185752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33186742471185754,
"avg_score": null,
"num_lines": null
} |
__all__ = ["DayCinema"]
import tushare as ts
from flask import request,json
from flask_restful import Resource
def get_day_cinema(day=None):
if day == None:
try:
result = ts.day_cinema().to_json()
except Exception as e:
result = json.dumps({"error":True,"message":str(e)})
else:
try:
result = ts.day_cinema(day).to_json()
except Exception as e:
result = json.dumps({"error":True,
"message":"can not get the data, format date as YYYY-M-D,error:{error}".format(error=e.message)})
return result
class DayCinema(Resource):
"""获取全国影院单日票房排行数据,默认为上一日,可输入日期参数获取指定日期的数据。
参数说明:
date:日期(YYYY-MM-DD),默认为上一日
返回值说明:
Attendance 上座率
AvgPeople 场均人次
CinemaName 影院名称
RowNum 排名
TodayAudienceCount 当日观众人数
TodayBox 当日票房
TodayShowCount 当日场次
price 场均票价(元)
"""
def get(self):
date = request.args.get("date")
return get_day_cinema(date)
| {
"repo_name": "FinaceInfo/Chinese-box-office-info",
"path": "api/boxoffice/day_cinema.py",
"copies": "1",
"size": "1177",
"license": "mit",
"hash": 318509471340882300,
"line_mean": 23.825,
"line_max": 109,
"alpha_frac": 0.609264854,
"autogenerated": false,
"ratio": 2.464019851116625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8542368254609406,
"avg_score": 0.006183290101443842,
"num_lines": 40
} |
__all__ = ["DeclHandler", "LexicalHandler", 'all_features', 'all_properties', 'feature_external_ges', 'feature_external_pes', 'feature_namespace_prefixes', 'feature_namespaces', 'feature_string_interning', 'feature_validation', 'property_declaration_handler', 'property_dom_node', 'property_encoding', 'property_interning_dict', 'property_lexical_handler', 'property_xml_string']
#Snipped from xml.sax.saxlib
"""
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id: pyxml_standins.py,v 1.2 2004/12/30 06:13:45 uogbuji Exp $
"""
# A number of interfaces used to live in saxlib, but are now in
# various other modules for Python 2 compatibility. If nobody uses
# them here any longer, the references can be removed
from xml.sax.handler import ErrorHandler, ContentHandler, DTDHandler, EntityResolver
from xml.sax.xmlreader import XMLReader, InputSource, Locator, IncrementalParser
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== DECLHANDLER =====
class DeclHandler:
"""Optional SAX2 handler for DTD declaration events.
Note that some DTD declarations are already reported through the
DTDHandler interface. All events reported to this handler will
occur between the startDTD and endDTD events of the
LexicalHandler.
To set the DeclHandler for an XMLReader, use the setProperty method
with the identifier http://xml.org/sax/handlers/DeclHandler."""
def attributeDecl(self, elem_name, attr_name, type, value_def, value):
"""Report an attribute type declaration.
Only the first declaration will be reported. The type will be
one of the strings "CDATA", "ID", "IDREF", "IDREFS",
"NMTOKEN", "NMTOKENS", "ENTITY", "ENTITIES", or "NOTATION", or
a list of names (in the case of enumerated definitions).
elem_name is the element type name, attr_name the attribute
type name, type a string representing the attribute type,
value_def a string representing the default declaration
('#IMPLIED', '#REQUIRED', '#FIXED' or None). value is a string
representing the attribute's default value, or None if there
is none."""
def elementDecl(self, elem_name, content_model):
"""Report an element type declaration.
Only the first declaration will be reported.
content_model is the string 'EMPTY', the string 'ANY' or the content
model structure represented as tuple (separator, tokens, modifier)
where separator is the separator in the token list (that is, '|' or
','), tokens is the list of tokens (element type names or tuples
representing parentheses) and modifier is the quantity modifier
('*', '?' or '+')."""
def internalEntityDecl(self, name, value):
"""Report an internal entity declaration.
Only the first declaration of an entity will be reported.
name is the name of the entity. If it is a parameter entity,
the name will begin with '%'. value is the replacement text of
the entity."""
def externalEntityDecl(self, name, public_id, system_id):
"""Report a parsed entity declaration. (Unparsed entities are
reported to the DTDHandler.)
Only the first declaration for each entity will be reported.
name is the name of the entity. If it is a parameter entity,
the name will begin with '%'. public_id and system_id are the
public and system identifiers of the entity. public_id will be
None if none were declared."""
# ===== LEXICALHANDLER =====
class LexicalHandler:
"""Optional SAX2 handler for lexical events.
This handler is used to obtain lexical information about an XML
document, that is, information about how the document was encoded
(as opposed to what it contains, which is reported to the
ContentHandler), such as comments and CDATA marked section
boundaries.
To set the LexicalHandler of an XMLReader, use the setProperty
method with the property identifier
'http://xml.org/sax/handlers/LexicalHandler'. There is no
guarantee that the XMLReader will support or recognize this
property."""
def comment(self, content):
"""Reports a comment anywhere in the document (including the
DTD and outside the document element).
content is a string that holds the contents of the comment."""
def startDTD(self, name, public_id, system_id):
"""Report the start of the DTD declarations, if the document
has an associated DTD.
A startEntity event will be reported before declaration events
from the external DTD subset are reported, and this can be
used to infer from which subset DTD declarations derive.
name is the name of the document element type, public_id the
public identifier of the DTD (or None if none were supplied)
and system_id the system identfier of the external subset (or
None if none were supplied)."""
def endDTD(self):
"Signals the end of DTD declarations."
def startEntity(self, name):
"""Report the beginning of an entity.
The start and end of the document entity is not reported. The
start and end of the external DTD subset is reported with the
pseudo-name '[dtd]'.
Skipped entities will be reported through the skippedEntity
event of the ContentHandler rather than through this event.
name is the name of the entity. If it is a parameter entity,
the name will begin with '%'."""
def endEntity(self, name):
"""Reports the end of an entity. name is the name of the
entity, and follows the same conventions as for
startEntity."""
def startCDATA(self):
"""Reports the beginning of a CDATA marked section.
The contents of the CDATA marked section will be reported
through the characters event."""
def endCDATA(self):
"Reports the end of a CDATA marked section."
#
#From xml.sax.handler
#
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) read-only; (not parsing) read/write
feature_validation = "http://xml.org/sax/features/validation"
# true: Report all validation errors (implies external-general-entities and
# external-parameter-entities).
# false: Do not report validation errors.
# access: (parsing) read-only; (not parsing) read/write
feature_external_ges = "http://xml.org/sax/features/external-general-entities"
# true: Include all external general (text) entities.
# false: Do not include external general entities.
# access: (parsing) read-only; (not parsing) read/write
feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"# true: Include all external parameter entities, including the external
# DTD subset.
# false: Do not include any external parameter entities, even the external
# DTD subset.
# access: (parsing) read-only; (not parsing) read/write
all_features = [feature_namespaces,
feature_namespace_prefixes,
feature_string_interning,
feature_validation,
feature_external_ges,
feature_external_pes]
#============================================================================
#
# CORE PROPERTIES
#
#============================================================================
property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
# data type: xml.sax.sax2lib.LexicalHandler
# description: An optional extension handler for lexical events like comments.
# access: read/write
property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
# data type: xml.sax.sax2lib.DeclHandler
# description: An optional extension handler for DTD-related events other
# than notations and unparsed entities.
# access: read/write
property_dom_node = "http://xml.org/sax/properties/dom-node"
# data type: org.w3c.dom.Node
# description: When parsing, the current DOM node being visited if this is
# a DOM iterator; when not parsing, the root DOM node for
# iteration.
# access: (parsing) read-only; (not parsing) read/write
property_xml_string = "http://xml.org/sax/properties/xml-string"
# data type: String
# description: The literal string of characters that was the source for
# the current event.
# access: read-only
property_encoding = "http://www.python.org/sax/properties/encoding"
# data type: String
# description: The name of the encoding to assume for input data.
# access: write: set the encoding, e.g. established by a higher-level
# protocol. May change during parsing (e.g. after
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
# data type: Dictionary
# description: The dictionary used to intern common strings in the document
# access: write: Request that the parser uses a specific dictionary, to
# allow interning across different documents
# read: return the current interning dictionary, or None
#
all_properties = [property_lexical_handler,
property_dom_node,
property_declaration_handler,
property_xml_string,
property_encoding,
property_interning_dict]
| {
"repo_name": "AmericanResearchInstitute/poweru-server",
"path": "cmis_storage/amara/pyxml_standins.py",
"copies": "1",
"size": "10950",
"license": "bsd-3-clause",
"hash": -7639086978039671000,
"line_mean": 40.0112359551,
"line_max": 379,
"alpha_frac": 0.6709589041,
"autogenerated": false,
"ratio": 4.366028708133971,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004887171418283165,
"num_lines": 267
} |
__all__ = ["Decoder"]
import struct
from array import array
import pyconspack.header as H
import pyconspack.types as T
import pyconspack.error as E
import pyconspack.index as I
class ForwardRef:
def __init__(self, tag):
self.tag = tag
def set(self, place, index = None, is_key = False):
self.place = place
self.index = index
self.is_key = is_key
def replace(self, value):
# Note that FREF=FREF doesn't really work... but a dict()
# can't currently be a key anyway, so stupidity prevents this
# from really being used at all.
if(self.is_key):
oldval = self.place[self]
del self.place[self]
self.place[value] = oldval
else:
self.place[self.index] = value
class Decoder:
def __init__(self, **kw):
self.opt = kw
self.frefs = dict()
self.tags = dict()
self.index = self._opt('index')
if(self.index):
self.index = I.Index(self.index)
def read_header(self, f):
return f.read(1)[0]
def read_size(self, h, f):
size_bytes = h & H.SIZE_MASK
if (size_bytes == 0): return struct.unpack('B', f.read(1))[0]
elif(size_bytes == 1): return struct.unpack('>H', f.read(2))[0]
elif(size_bytes == 2): return struct.unpack('>I', f.read(4))[0]
elif(size_bytes == 3): return struct.unpack('>Q', f.read(8))[0]
def _opt(self, name):
return (name in self.opt) and (self.opt[name])
def _push_fref(self, tag):
fref = ForwardRef(tag)
if(tag in self.frefs):
self.frefs[tag].append(fref)
else:
self.frefs[tag] = [fref]
return fref
def _maybe_fref(self, val, place, index=None, is_key=False):
if(not type(val) is ForwardRef):
return
val.set(place, index, is_key)
def decode(self, f):
return self._decode(f)
def _decode(self, f, fixed=None):
if(not fixed):
h = self.read_header(f)
else:
h = fixed
if (H.is_bool(h)): return self.decode_bool(h)
elif(H.is_number(h)): return self.decode_number(h, f)
elif(H.is_index(h)): return self.decode_index(h, f)
elif(H.is_container(h)): return self.decode_container(h, f)
elif(H.is_cons(h)): return self.decode_cons(h, f)
elif(H.is_string(h)): return self.decode_string(h, f)
elif(H.is_character(h)): return self.decode_character(h, f)
elif(H.is_rref(h)): return self.decode_rref(h, f)
elif(H.is_pointer(h)): return self.decode_pointer(h, f)
elif(H.is_package(h)): return self.decode_package(h, f)
elif(H.is_symbol(h)): return self.decode_symbol(h, f)
elif(H.is_tag(h)): return self.decode_tag(h, f)
elif(H.is_ref(h)): return self.decode_ref(h, f)
else:
raise E.BadHeader("Bad header byte: 0b{h:08b}".format(h=h))
def decode_bool(self, h):
if(h == 0x00): return ()
else: return True
def decode_n(self, f, c):
n = 0
for i in range(c):
n <<= 8
n |= f.read(1)[0]
return n
def decode_number(self, h, f):
c, fmt = H.fixed_type_fmt(h)
if(fmt): return struct.unpack('>'+fmt, f.read(c))[0]
elif(t == H.INT128):
n = self.decode_n(f, 16)
if(n > 2**127):
n -= 2**128
return n
elif(t == H.UINT128):
return self.decode_n(f, 16)
def decode_container(self, h, f):
t = h & H.CONTAINER_TYPE_MASK
if (t == H.CONTAINER_VECTOR): return self.decode_vector(h, f)
elif(t == H.CONTAINER_LIST): return self.decode_list(h, f)
elif(t == H.CONTAINER_MAP): return self.decode_map(h, f)
elif(t == H.CONTAINER_TMAP): return self.decode_map(h, f)
def decode_list(self, h, f):
size = self.read_size(h, f)
fixed = None
if(h & H.CONTAINER_FIXED):
fixed = f.read(1)[0]
l = []
for i in range(size-1):
val = self._decode(f, fixed)
l.append(val)
self._maybe_fref(val, l, i)
final = self._decode(f, fixed)
if(final == () or
not (h & H.CONTAINER_TYPE_MASK) == H.CONTAINER_LIST):
return l
else:
l = T.DottedList(l)
l.append(final)
self._maybe_fref(final, l, len(l)-1)
return T.DottedList(l)
def decode_vector(self, h, f):
if(not (h & H.CONTAINER_FIXED)):
return T.Vector(self.decode_list(self, h, f))
size = self.read_size(h, f)
fixed = f.read(1)[0]
c, fmt = H.fixed_type_fmt(fixed)
a = array(fmt)
for i in range(size):
val = self._decode(f, fixed)
a.append(val)
self._maybe_fref(val, a, i)
return a
def decode_map(self, h, f):
size = self.read_size(h, f)
fixed = None
if(h & H.CONTAINER_FIXED):
fixed = self.read_header(f)
tmap_type = None
if((h & H.CONTAINER_TYPE_MASK) == H.CONTAINER_TMAP):
tmap_type = self._decode(f)
d = dict()
for i in range(size):
k = self._decode(f, fixed)
v = self._decode(f)
self._maybe_fref(k, d, is_key=True)
self._maybe_fref(v, d, k)
d[k] = v
if(tmap_type):
decoders = self._opt('decoders')
if(not decoders or tmap_type not in decoders):
if(tmap_type in Decoder.class_decoders):
decoders = Decoder.class_decoders
else:
raise E.NoDecoder("Decoder for {t} not found".format(t=tmap_type))
return decoders[tmap_type](d)
else:
return d
def decode_string(self, h, f):
size = self.read_size(h, f)
return f.read(size).decode(encoding='utf-8', errors='strict')
def decode_character(self, h, f):
size = h & H.SIZE_MASK
return f.read(size).decode(encoding='utf-8', errors='strict')
def decode_package(self, h, f):
name = self._decode(f)
return T.package(name)
def decode_symbol(self, h, f):
name = self._decode(f)
package = None
if(H.is_keyword(h)):
package = "KEYWORD"
else:
package = self._decode(f)
return T.intern(name, package)
def decode_rref(self, h, f):
decoder = self._opt('rref_decoder')
rref = self._decode(f);
if(decoder):
return decoder(rref)
else:
return T.RRef(rref)
def decode_pointer(self, h, f):
decoder = self._opt('pointer_decoder')
val = self.read_size(h, f)
if(decoder):
return decoder(val)
else:
return T.Pointer(val)
def decode_cons(self, h, f):
car = self._decode(f)
cdr = self._decode(f)
if(not cdr):
return [car]
else:
return T.DottedList([car,cdr])
def decode_tag(self, h, f):
tag = None
if(h & H.REFTAG_INLINE):
tag = h & H.REFTAG_INLINE_VALUE
else:
tag = self.read_size(h, f)
ob = self._decode(f)
self.tags[tag] = ob
if(tag in self.frefs):
self.replace_frefs(tag, ob)
return ob
def decode_ref(self, h, f):
tag = None
if(h & H.REFTAG_INLINE):
tag = h & H.REFTAG_INLINE_VALUE
else:
tag = self.read_size(h, f)
if(tag in self.tags):
return self.tags[tag]
return self._push_fref(tag)
def replace_frefs(self, tag, val):
for f in self.frefs[tag]:
f.replace(val)
def decode_index(self, h, f):
val = None
if(h & H.REFTAG_INLINE):
val = h & H.REFTAG_INLINE_VALUE
else:
tag = self.read_size(h, f)
if(self.index):
return self.index.index(val) or T.Index(val)
else:
return T.Index(val)
class_decoders = dict()
def register(symbol, func):
Decoder.class_decoders[symbol] = func
def deregister(symbol):
del Decoder.class_decoders[symbol]
| {
"repo_name": "conspack/pyconspack",
"path": "decode.py",
"copies": "1",
"size": "8451",
"license": "bsd-2-clause",
"hash": 7268655817434544000,
"line_mean": 27.7448979592,
"line_max": 86,
"alpha_frac": 0.5131937049,
"autogenerated": false,
"ratio": 3.298594847775176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9281058654548429,
"avg_score": 0.006145979625349561,
"num_lines": 294
} |
__all__ = ['dedupe', 'partition', 'chunk_iterable']
import itertools
def dedupe(items, key=None):
"""Remove duplicates in an iterable of items based on key function.
:type items: iterable
:param items: Iterable of items.
:type key: function
:param key: A key function that takes an item as argument.
:rtype: iterator
:returns: Deduplicated, order-preserving iterable of items.
"""
seen = set()
for item in items:
val = key(item) if key is not None else item
if val not in seen:
yield item
seen.add(val)
def partition(iterable, k):
"""
Partition finite iterable into k almost-equal partitions in round-robin manner.
:type iterable: iterable
:param iterable: A finite iterable.
:type k: int
:param k: Number of partitions.
:rtype: list
:returns: List of lists of partitioned iterable.
"""
partitioned = []
for idx, item in zip(itertools.cycle(range(k)), iterable):
try:
partitioned[idx].append(item)
except IndexError:
partitioned.append([item])
return partitioned
def chunk_iterable(iterable, size):
"""Chunk iterable to approximately the same size.
:type iterable: iterable
:param iterable: Iterable to be chunked.
:type size: int
:param size: Number of elements per chunk.
:rtype: iterator
:returns: A list of chunks tuple.
"""
it = iter(iterable)
chunk = tuple(itertools.islice(it, size))
while chunk:
yield chunk
chunk = tuple(itertools.islice(it, size))
| {
"repo_name": "hans-t/autos",
"path": "autos/utils/iterable.py",
"copies": "1",
"size": "1605",
"license": "mit",
"hash": 432701877761510400,
"line_mean": 22.9552238806,
"line_max": 83,
"alpha_frac": 0.6342679128,
"autogenerated": false,
"ratio": 4.158031088082901,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015103056147832267,
"num_lines": 67
} |
__all__ = ["Definition", "find_file"]
class Definition(object):
'''
All Definitions should inherit this.
For a given subject and session within the API, the definition is used
to create a given mask, map, etc.
Definitions have an init function which the users uses to specify
how they want the definition to behave.
The find_path and get_for_subses functions are called by the AFQ API.
The api calls find_path to let the definition find relevant files
for the given subject and session. The api calls get_for_subses to get the
mask, map, etc.
'''
def __init__(self):
raise NotImplementedError("Please implement an __init__ method")
def find_path(self, bids_layout, from_path, subject, session):
raise NotImplementedError("Please implement a find_path method")
def str_for_toml(self):
"""
Uses __init__ in str_for_toml to make string that will instantiate
itself. Assumes object will have attributes of same name as
__init__ args. This is important for reading/writing definitions
as arguments to config files.
"""
return type(self).__name__\
+ "("\
+ _arglist_to_string(
self.__init__.__code__.co_varnames,
get_attr=self)\
+ ')'
def _arglist_to_string(args, get_attr=None):
'''
Helper function
Takes a list of arguments and unfolds them into a string.
If get_attr is not None, it will be used to get the attribute
corresponding to each argument instead.
'''
to_string = ""
for arg in args:
if arg == "self":
continue
if get_attr is not None:
arg = getattr(get_attr, arg)
if isinstance(arg, Definition):
arg = arg.str_for_toml()
elif isinstance(arg, str):
arg = f"\"{arg}\""
elif isinstance(arg, list):
arg = "[" + _arglist_to_string(arg) + "]"
to_string = to_string + str(arg) + ', '
if to_string[-2:] == ', ':
to_string = to_string[:-2]
return to_string
def find_file(bids_layout, path, filters, suffix, session, subject,
extension=".nii.gz"):
"""
Helper function
Generic calls to get_nearest to find a file
"""
# First, try to match the session.
nearest = bids_layout.get_nearest(
path,
**filters,
extension=extension,
suffix=suffix,
session=session,
subject=subject,
full_search=True,
strict=False,
)
if nearest is None:
# If that fails, loosen session restriction
nearest = bids_layout.get_nearest(
path,
**filters,
extension=extension,
suffix=suffix,
subject=subject,
full_search=True,
strict=False,
)
path_subject = bids_layout.parse_file_entities(path).get(
"subject", None
)
file_subject = bids_layout.parse_file_entities(nearest).get(
"subject", None
)
if path_subject != file_subject:
raise ValueError(
f"Expected subject IDs to match for the retrieved mask file "
f"and the supplied `from_path` file. Got sub-{file_subject} "
f"from mask file {nearest} and sub-{path_subject} "
f"from `from_path` file {path}."
)
return nearest
| {
"repo_name": "yeatmanlab/pyAFQ",
"path": "AFQ/definitions/utils.py",
"copies": "2",
"size": "3425",
"license": "bsd-2-clause",
"hash": -2463183909907093000,
"line_mean": 31.0093457944,
"line_max": 78,
"alpha_frac": 0.583649635,
"autogenerated": false,
"ratio": 4.136473429951691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5720123064951691,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DelimitedTextReader', 'DelimitedPointsReaderBase', 'XYZTextReader']
__displayname__ = 'Delimited File I/O'
import sys
import numpy as np
import pandas as pd
from .. import _helpers, interface
from ..base import ReaderBase
if sys.version_info < (3,):
from StringIO import StringIO
else:
from io import StringIO
class DelimitedTextReader(ReaderBase):
"""This reader will take in any delimited text file and make a ``vtkTable``
from it. This is not much different than the default .txt or .csv reader in
ParaView, however it gives us room to use our own extensions and a little
more flexibility in the structure of the files we import.
"""
__displayname__ = 'Delimited Text Reader'
__category__ = 'reader'
extensions = 'dat csv txt text ascii xyz tsv ntab'
description = 'PVGeo: Delimited Text Files'
def __init__(self, nOutputPorts=1, outputType='vtkTable', **kwargs):
ReaderBase.__init__(
self, nOutputPorts=nOutputPorts, outputType=outputType, **kwargs
)
# Parameters to control the file read:
# - if these are set/changed, we must reperform the read
self.__delimiter = kwargs.get('delimiter', ' ')
self.__use_tab = kwargs.get('use_tab', False)
self.__skipRows = kwargs.get('skiprows', 0)
self.__comments = kwargs.get('comments', '!')
self.__has_titles = kwargs.get('has_titles', True)
# Data objects to hold the read data for access by the pipeline methods
self._data = []
self._titles = []
def _get_delimiter(self):
"""For itenral use only!"""
if self.__use_tab:
return None
return self.__delimiter
def get_split_on_white_space(self):
"""Returns the status of how the delimiter interprets whitespace"""
return self.__use_tab
#### Methods for performing the read ####
def _get_file_contents(self, idx=None):
"""This grabs the lines of the input data file as a string array. This
allows us to load the file contents, parse the header then use numpy or
pandas to parse the data."""
if idx is not None:
filenames = [self.get_file_names(idx=idx)]
else:
filenames = self.get_file_names()
contents = []
for f in filenames:
try:
contents.append(
np.genfromtxt(
f, dtype=str, delimiter='\n', comments=self.__comments
)[self.__skipRows : :]
)
except (IOError, OSError) as fe:
raise _helpers.PVGeoError(str(fe))
if idx is not None:
return contents[0]
return contents
def _extract_header(self, content):
"""Override this. Removes header from single file's content."""
if len(np.shape(content)) > 2:
raise _helpers.PVGeoError(
"`_extract_header()` can only handle a sigle file's content"
)
idx = 0
if self.__has_titles:
titles = content[idx].split(self._get_delimiter())
idx += 1
else:
cols = len(content[idx].split(self._get_delimiter()))
titles = []
for i in range(cols):
titles.append('Field %d' % i)
return titles, content[idx::]
def _extract_headers(self, contents):
"""Should NOT be overriden. This is a convienance methods to iteratively
get all file contents. Your should override ``_extract_header``.
"""
ts = []
for i, c in enumerate(contents):
titles, newcontent = self._extract_header(c)
contents[i] = newcontent
ts.append(titles)
# Check that the titles are the same across files:
ts = np.unique(np.asarray(ts), axis=0)
if len(ts) > 1:
raise _helpers.PVGeoError(
'Data array titles varied across file timesteps. This data is invalid as a timeseries.'
)
return ts[0], contents
def _file_contents_to_data_frame(self, contents):
"""Should NOT need to be overriden. After ``_extract_headers`` handles
removing the file header from the file contents, this method will parse
the remainder of the contents into a pandas DataFrame with column names
generated from the titles resulting from in ``_extract_headers``.
"""
data = []
for content in contents:
if self.get_split_on_white_space():
df = pd.read_csv(
StringIO("\n".join(content)),
names=self.get_titles(),
delim_whitespace=self.get_split_on_white_space(),
)
else:
df = pd.read_csv(
StringIO("\n".join(content)),
names=self.get_titles(),
sep=self._get_delimiter(),
)
data.append(df)
return data
def _read_up_front(self):
"""Should not need to be overridden."""
# Perform Read
contents = self._get_file_contents()
self._titles, contents = self._extract_headers(contents)
self._data = self._file_contents_to_data_frame(contents)
self.need_to_read(flag=False)
return 1
#### Methods for accessing the data read in #####
def _get_raw_data(self, idx=0):
"""This will return the proper data for the given timestep as a dataframe"""
return self._data[idx]
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to get data for current timestep and populate the
output data object.
"""
# Get output:
output = self.GetOutputData(outInfo, 0)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
if self.need_to_read():
self._read_up_front()
# Generate the data object
interface.data_frame_to_table(self._get_raw_data(idx=i), output)
return 1
#### Seters and Geters ####
def set_delimiter(self, deli):
"""The input file's delimiter. To use a tab delimiter please use
``set_split_on_white_space()``
Args:
deli (str): a string delimiter/seperator
"""
if deli != self.__delimiter:
self.__delimiter = deli
self.Modified()
def set_split_on_white_space(self, flag):
"""Set a boolean flag to override the ``set_delimiter()`` and use any
white space as a delimiter.
"""
if flag != self.__use_tab:
self.__use_tab = flag
self.Modified()
def set_skip_rows(self, skip):
"""Set the integer number of rows to skip at the top of the file."""
if skip != self.__skipRows:
self.__skipRows = skip
self.Modified()
def get_skip_rows(self):
"""Get the integer number of rows to skip at the top of the file."""
return self.__skipRows
def set_comments(self, identifier):
"""The character identifier for comments within the file."""
if identifier != self.__comments:
self.__comments = identifier
self.Modified()
def set_has_titles(self, flag):
"""Set the boolean for if the delimited file has header titles for the
data arrays.
"""
if self.__has_titles != flag:
self.__has_titles = flag
self.Modified()
def has_titles(self):
"""Get the boolean for if the delimited file has header titles for the
data arrays.
"""
return self.__has_titles
def get_titles(self):
return self._titles
###############################################################################
class DelimitedPointsReaderBase(DelimitedTextReader):
"""A base class for delimited text readers that produce ``vtkPolyData``
points.
"""
__displayname__ = 'Delimited Points Reader Base'
__category__ = 'base'
# extensions are inherrited from DelimitedTextReader
description = 'PVGeo: Delimited Points' # Should be overriden
def __init__(self, **kwargs):
DelimitedTextReader.__init__(self, outputType='vtkPolyData', **kwargs)
self.__copy_z = kwargs.get('copy_z', False)
def set_copy_z(self, flag):
"""Set whether or not to copy the Z-component of the points to the
Point Data"""
if self.__copy_z != flag:
self.__copy_z = flag
self.Modified()
def get_copy_z(self):
"""Get the status of whether or not to copy the Z-component of the
points to the Point Data
"""
return self.__copy_z
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to get data for current timestep and populate the
output data object.
"""
# Get output:
output = self.GetOutputData(outInfo, 0)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
if self.need_to_read():
self._read_up_front()
# Generate the PolyData output
data = self._get_raw_data(idx=i)
output.DeepCopy(interface.points_to_poly_data(data, copy_z=self.get_copy_z()))
return 1
###############################################################################
class XYZTextReader(DelimitedTextReader):
"""A makeshift reader for XYZ files where titles have comma delimiter and
data has space delimiter.
"""
__displayname__ = 'XYZ Text Reader'
__category__ = 'reader'
# extensions are inherrited from DelimitedTextReader
description = 'PVGeo: XYZ Delimited Text Files where header has comma delimiter.'
def __init__(self, **kwargs):
DelimitedTextReader.__init__(self, **kwargs)
self.set_comments(kwargs.get('comments', '#'))
# Simply override the extract titles functionality
def _extract_header(self, content):
"""Internal helper to parse header details for XYZ files"""
titles = content[0][2::].split(', ') # first two characers of header is '! '
return titles, content[1::]
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/readers/delimited.py",
"copies": "1",
"size": "10331",
"license": "bsd-3-clause",
"hash": -7214485528681268000,
"line_mean": 34.2593856655,
"line_max": 103,
"alpha_frac": 0.5754525215,
"autogenerated": false,
"ratio": 4.299209321681232,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5374661843181232,
"avg_score": null,
"num_lines": null
} |
__all__ = ["denoise_image"]
from .. import utils
from . import process_args as pargs
from .get_mask import get_mask
def denoise_image(
image, mask=None, shrink_factor=1, p=1, r=3, noise_model="Rician", v=0
):
"""
Denoise an image using a spatially adaptive filter originally described in
J. V. Manjon, P. Coupe, Luis Marti-Bonmati, D. L. Collins, and M. Robles.
Adaptive Non-Local Means Denoising of MR Images With Spatially Varying
Noise Levels, Journal of Magnetic Resonance Imaging, 31:192-203, June 2010.
ANTsR function: `denoiseImage`
Arguments
---------
image : ANTsImage
scalar image to denoise.
mask : ANTsImage
to limit the denoise region.
shrink_factor : scalar
downsampling level performed within the algorithm.
p : integer or character of format '2x2' where the x separates vector entries
patch radius for local sample.
r : integer or character of format '2x2' where the x separates vector entries
search radius from which to choose extra local samples.
noise_model : string
'Rician' or 'Gaussian'
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> import numpy as np
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> # add fairly large salt and pepper noise
>>> imagenoise = image + np.random.randn(*image.shape).astype('float32')*5
>>> imagedenoise = ants.denoise_image(imagenoise, ants.get_mask(image))
"""
inpixeltype = image.pixeltype
outimage = image.clone("float")
mydim = image.dimension
if mask is None:
myargs = {
"d": mydim,
"i": image,
"n": noise_model,
"s": int(shrink_factor),
"p": p,
"r": r,
"o": outimage,
"v": v,
}
else:
myargs = {
"d": mydim,
"i": image,
"n": noise_model,
"x": mask.clone("unsigned char"),
"s": int(shrink_factor),
"p": p,
"r": r,
"o": outimage,
"v": v,
}
processed_args = pargs._int_antsProcessArguments(myargs)
libfn = utils.get_lib_fn("DenoiseImage")
libfn(processed_args)
return outimage.clone(inpixeltype)
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/utils/denoise_image.py",
"copies": "1",
"size": "2321",
"license": "apache-2.0",
"hash": -3427855477427090000,
"line_mean": 26.630952381,
"line_max": 81,
"alpha_frac": 0.5777682034,
"autogenerated": false,
"ratio": 3.4902255639097746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4567993767309775,
"avg_score": null,
"num_lines": null
} |
#__all__ = ['deque', 'defaultdict', 'Counter']
from _collections import deque, defaultdict
#from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
# fixme brython.. there is an issue with _abcoll
#from _abcoll import *
#from _abcoll import Set
from _abcoll import MutableMapping
#import _abcoll
#__all__ += _abcoll.__all__
from collections.abc import *
import collections.abc
__all__ += collections.abc.__all__
from _collections import deque, defaultdict, namedtuple
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
#fixme brython
#from weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
class Set(set):
pass
class Sequence(list):
pass
def _proxy(obj):
return obj
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
#fixme brython.. Issue with _abcoll, which contains MutableMapping
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
#fixme, brython issue
#@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
#try: # Load C helper function if available
# from _collections import _count_elements
#except ImportError:
# pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
#super().__init__() #BE modified since super not supported
dict.__init__(self)
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
########################################################################
### ChainMap (helper for configparser)
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
#fixme, brython
#@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
def __repr__(self):
return ','.join(str(_map) for _map in self.maps)
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
#raise KeyError('Key not found in the first mapping: {!r}'.format(key))
raise KeyError('Key not found in the first mapping: %s' % key)
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def clear(self): self.data.clear()
def copy(self): return self.__class__(self)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __ne__(self, string):
if isinstance(string, UserString):
return self.data != string.data
return self.data != string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
| {
"repo_name": "lidiamcfreitas/FenixScheduleMaker",
"path": "ScheduleMaker/brython/www/src/Lib/collections/__init__.py",
"copies": "12",
"size": "34896",
"license": "bsd-2-clause",
"hash": -1382751781762709000,
"line_mean": 36.4420600858,
"line_max": 97,
"alpha_frac": 0.5541036222,
"autogenerated": false,
"ratio": 4.138519924098672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['deque', 'defaultdict', 'namedtuple']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print template
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
| {
"repo_name": "DecipherOne/Troglodyte",
"path": "Trog Build Dependencies/Python26/Lib/collections.py",
"copies": "6",
"size": "6150",
"license": "mit",
"hash": 5728178793281582000,
"line_mean": 40.2751677852,
"line_max": 126,
"alpha_frac": 0.5830894309,
"autogenerated": false,
"ratio": 3.8948701709943,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7477959601894301,
"avg_score": null,
"num_lines": null
} |
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict', 'ChainMap']
# For backwards compatibility, continue to make the collections ABCs
# available through the collections module.
from _collections_abc import *
import _collections_abc
__all__ += _collections_abc.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
################################################################################
### namedtuple
################################################################################
_class_template = """\
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
@property
def __dict__(self):
'A new OrderedDict mapping field names to their values'
return OrderedDict(zip(self._fields, self))
def _asdict(self):
'Return a new OrderedDict which maps field names to their values.'
return self.__dict__
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
return None
{field_defs}
"""
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if type(name) != str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
'identifiers: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super().__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
return self + Counter()
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
return Counter() - self
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
########################################################################
### ChainMap (helper for configparser and string.Template)
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''
New ChainMap with a new map followed by all previous maps. If no
map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def clear(self): self.data.clear()
def copy(self): return self.__class__(self)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __ne__(self, string):
if isinstance(string, UserString):
return self.data != string.data
return self.data != string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
| {
"repo_name": "ForgottenKahz/CloudOPC",
"path": "venv/Lib/collections/__init__.py",
"copies": "68",
"size": "41964",
"license": "mit",
"hash": -304196575469146900,
"line_mean": 36.0706713781,
"line_max": 99,
"alpha_frac": 0.5507339624,
"autogenerated": false,
"ratio": 4.176769184831293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0028158424935218014,
"num_lines": 1132
} |
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is stored in self.__hardroot with a weakref proxy in self.__root.
# The prev/next links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__root, self.__hardroot
del self.__map, self.__root, self.__hardroot
inst_dict = vars(self).copy()
self.__map, self.__root, self.__hardroot = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'OD.setdefault(k[,d]) -> OD.get(k,d), also set OD[k]=d if k not in OD'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
################################################################################
### namedtuple
################################################################################
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not all(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
'Create new instance of %(typename)s(%(argtxt)s)'
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += " %s = _property(_itemgetter(%d), doc='Alias for field number %d')\n" % (name, i, i)
if verbose:
print(template)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError as e:
raise SyntaxError(e.msg + ':\n\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super().__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
p, q = self[elem], other[elem]
newcount = q if p < q else p
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in filter(self.__contains__, other):
p, q = self[elem], other[elem]
newcount = p if p < q else q
if newcount > 0:
result[elem] = newcount
return result
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __ne__(self, string):
if isinstance(string, UserString):
return self.data != string.data
return self.data != string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
################################################################################
### Simple tests
################################################################################
if __name__ == '__main__':
# verify that instances can be pickled
from pickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print (p)
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print(Point(11, 22)._replace(x=100))
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print(Point3D.__doc__)
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print(TestResults(*doctest.testmod()))
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-3.2/Lib/collections.py",
"copies": "1",
"size": "36703",
"license": "mit",
"hash": 2539180549309281000,
"line_mean": 38.0457446809,
"line_max": 126,
"alpha_frac": 0.5493283928,
"autogenerated": false,
"ratio": 4.118841880821456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003599108572733478,
"num_lines": 940
} |
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
################################################################################
### namedtuple
################################################################################
def namedtuple(typename, field_names, verbose=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(cls, %(argtxt)s):
return tuple.__new__(cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = self._make(map(kwds.pop, %(field_names)r, self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = property(itemgetter(%d))\n' % (name, i)
if verbose:
print(template)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(itemgetter=_itemgetter, __name__='namedtuple_%s' % typename)
try:
exec(template, namespace)
except SyntaxError as e:
raise SyntaxError(e.msg + ':\n' + template) from e
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals['__name__']
return result
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __cmp__(self, other):
return cmp(self.data, self.__cast(other))
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __ne__(self, string):
if isinstance(string, UserString):
return self.data != string.data
return self.data != string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
################################################################################
### Simple tests
################################################################################
if __name__ == '__main__':
# verify that instances can be pickled
from pickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print (p)
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print(Point(11, 22)._replace(x=100))
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print(Point3D.__doc__)
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print(TestResults(*doctest.testmod()))
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-3.0/Lib/collections.py",
"copies": "1",
"size": "17446",
"license": "mit",
"hash": 4950037682656483000,
"line_mean": 40.1462264151,
"line_max": 126,
"alpha_frac": 0.5634529405,
"autogenerated": false,
"ratio": 3.970414201183432,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5033867141683432,
"avg_score": null,
"num_lines": null
} |
"""All descriptor modules are loaded.
.. code:: python
>>> from mordred import Calculator, descriptors
>>> descriptors.ABCIndex.__name__ # ABCIndex module
'mordred.ABCIndex'
>>> len(descriptors.all) # all descriptor modules
50
>>> calc = Calculator(descriptors) # all descriptors
"""
def _import_all_descriptors():
import os
from importlib import import_module
from .._base.descriptor import is_descriptor_class
names = []
values = []
base_dir = os.path.dirname(os.path.dirname(__file__))
for name in sorted(os.listdir(base_dir)):
name, ext = os.path.splitext(name)
if name[:1] == "_" or ext != ".py":
continue
mdl = import_module(".." + name, __package__)
if any(v for v in mdl.__dict__.values() if is_descriptor_class(v)):
names.append(name)
values.append(mdl)
globals()[name] = mdl
globals()["__all__"] = tuple(names)
globals()["all"] = tuple(values)
_import_all_descriptors()
| {
"repo_name": "mordred-descriptor/mordred",
"path": "mordred/descriptors/__init__.py",
"copies": "1",
"size": "1044",
"license": "bsd-3-clause",
"hash": -1206776678479332900,
"line_mean": 23.8571428571,
"line_max": 75,
"alpha_frac": 0.5862068966,
"autogenerated": false,
"ratio": 3.81021897810219,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.989642587470219,
"avg_score": 0,
"num_lines": 42
} |
__all__ = ('DesignerApp', )
import kivy
import time
import os
import shutil
import traceback
kivy.require('1.4.1')
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.layout import Layout
from kivy.factory import Factory
from kivy.properties import ObjectProperty, BooleanProperty
from kivy.clock import Clock
from kivy.uix import actionbar
from kivy.garden.filebrowser import FileBrowser
from kivy.uix.popup import Popup
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelItem
from kivy.lang import Builder
from kivy.uix.carousel import Carousel
from kivy.uix.screenmanager import ScreenManager
import designer
from designer.uix.actioncheckbutton import ActionCheckButton
from designer.playground import PlaygroundDragElement
from designer.common import widgets
from designer.uix.editcontview import EditContView
from designer.uix.kv_lang_area import KVLangArea
from designer.undo_manager import WidgetOperation, UndoManager
from designer.project_loader import ProjectLoader, ProjectLoaderException
from designer.select_class import SelectClass
from designer.confirmation_dialog import ConfirmationDialog
from designer.proj_watcher import ProjectWatcher
from designer.recent_manager import RecentManager, RecentDialog
from designer.add_file import AddFileDialog
from designer.ui_creator import UICreator
from designer.designer_content import DesignerContent
from designer.uix.designer_sandbox import DesignerSandbox
from designer.project_settings import ProjectSettings
from designer.designer_settings import DesignerSettings
from designer.helper_functions import get_kivy_designer_dir
from designer.new_dialog import NewProjectDialog, NEW_PROJECTS
from designer.eventviewer import EventViewer
from designer.uix.designer_action_items import DesignerActionButton
from designer.help_dialog import HelpDialog, AboutDialog
NEW_PROJECT_DIR_NAME = 'new_proj'
NEW_TEMPLATES_DIR = 'new_templates'
class Designer(FloatLayout):
'''Designer is the Main Window class of Kivy Designer
:data:`message` is a :class:`~kivy.properties.StringProperty`
'''
designer_console = ObjectProperty(None)
'''Instance of :class:`designer.designer_console.ConsoleDialog`
'''
statusbar = ObjectProperty(None)
'''Reference to the :class:`~designer.statusbar.StatusBar` instance.
:data:`statusbar` is a :class:`~kivy.properties.ObjectProperty`
'''
editcontview = ObjectProperty(None)
'''Reference to the :class:`~designer.uix.EditContView` instance.
:data:`v` is a :class:`~kivy.properties.ObjectProperty`
'''
actionbar = ObjectProperty(None)
'''Reference to the :class:`~kivy.actionbar.ActionBar` instance.
ActionBar is used as a MenuBar to display bunch of menu items.
:data:`actionbar` is a :class:`~kivy.properties.ObjectProperty`
'''
undo_manager = ObjectProperty(UndoManager())
'''Reference to the :class:`~designer.UndoManager` instance.
:data:`undo_manager` is a :class:`~kivy.properties.ObjectProperty`
'''
project_watcher = ObjectProperty(None)
'''Reference to the :class:`~designer.project_watcher.ProjectWatcher`.
:data:`project_watcher` is a :class:`~kivy.properties.ObjectProperty`
'''
project_loader = ObjectProperty(None)
'''Reference to the :class:`~designer.project_loader.ProjectLoader`.
:data:`project_loader` is a :class:`~kivy.properties.ObjectProperty`
'''
proj_settings = ObjectProperty(None)
'''Reference of :class:`~designer.project_settings.ProjectSettings`.
:data:`proj_settings` is a :class:`~kivy.properties.ObjectProperty`
'''
_curr_proj_changed = BooleanProperty(False)
'''Specifies whether current project has been changed inside Kivy Designer
:data:`_curr_proj_changed` is
a :class:`~kivy.properties.BooleanProperty`
'''
_proj_modified_outside = BooleanProperty(False)
'''Specifies whether current project has been changed outside Kivy Designer
:data:`_proj_modified_outside` is a
:class:`~kivy.properties.BooleanProperty`
'''
ui_creator = ObjectProperty(None)
'''Reference to :class:`~designer.ui_creator.UICreator` instance.
:data:`ui_creator` is a :class:`~kivy.properties.ObjectProperty`
'''
designer_content = ObjectProperty(None)
'''Reference to
:class:`~designer.designer_content.DesignerContent` instance.
:data:`designer_content` is a :class:`~kivy.properties.ObjectProperty`
'''
proj_tree_view = ObjectProperty(None)
'''Reference to Project Tree instance
:data:`proj_tree_view` is a :class:`~kivy.properties.ObjectProperty`
'''
designer_settings = ObjectProperty(None)
'''Reference of :class:`~designer.designer_settings.DesignerSettings`.
:data:`designer_settings` is a :class:`~kivy.properties.ObjectProperty`
'''
start_page = ObjectProperty(None)
'''Reference of :class:`~designer.start_page.DesignerStartPage`.
:data:`start_page` is a :class:`~kivy.properties.ObjectProperty`
'''
recent_files_cont_menu = ObjectProperty(None)
'''The context sub menu, containing the recently opened/saved projects.
Reference of :class:`~designer.uix.contextual.ContextSubMenu`.
:data:`recent_files_cont_menu` is a
:class:`~kivy.properties.ObjectProperty`
'''
def __init__(self, **kwargs):
super(Designer, self).__init__(**kwargs)
self.project_watcher = ProjectWatcher(self.project_modified)
self.project_loader = ProjectLoader(self.project_watcher)
self.recent_manager = RecentManager()
self.widget_to_paste = None
self.designer_content = DesignerContent(size_hint=(1, None))
self.designer_settings = DesignerSettings()
self.designer_settings.bind(on_config_change=self._config_change)
self.designer_settings.load_settings()
self.designer_settings.bind(on_close=self._cancel_popup)
Clock.schedule_interval(
self.project_loader.perform_auto_save,
int(self.designer_settings.config_parser.getdefault(
'global', 'auto_save_time', 5))*60)
def show_help(self, *args):
'''Event handler for 'on_help' event of self.start_page
'''
self.help_dlg = HelpDialog()
self._popup = Popup(title='Kivy Designer Help', content=self.help_dlg,
size_hint=(0.95, 0.95),
auto_dismiss=False)
self._popup.open()
self.help_dlg.bind(on_cancel=self._cancel_popup)
self.help_dlg.rst.source = 'help.rst'
def _config_change(self, *args):
'''Event Handler for 'on_config_change'
event of self.designer_settings.
'''
Clock.unschedule(self.project_loader.perform_auto_save)
Clock.schedule_interval(
self.project_loader.perform_auto_save,
int(self.designer_settings.config_parser.getdefault(
'global', 'auto_save_time', 5))*60)
self.ui_creator.kv_code_input.reload_kv = \
bool(self.designer_settings.config_parser.getdefault(
'global', 'reload_kv', True))
self.recent_manager.max_recent_files = \
int(self.designer_settings.config_parser.getdefault(
'global', 'num_recent_files', 5))
def _add_designer_content(self):
'''Add designer_content to Designer, when a project is loaded
'''
for _child in self.children[:]:
if _child == self.designer_content:
return
self.remove_widget(self.start_page)
self.add_widget(self.designer_content, 1)
self.ids['actn_btn_save'].disabled = False
self.ids['actn_btn_save_as'].disabled = False
self.ids['actn_chk_proj_tree'].disabled = False
self.ids['actn_chk_prop_event'].disabled = False
self.ids['actn_chk_widget_tree'].disabled = False
self.ids['actn_chk_status_bar'].disabled = False
self.ids['actn_chk_kv_lang_area'].disabled = False
self.ids['actn_btn_add_file'].disabled = False
self.ids['actn_btn_custom_widget'].disabled = False
self.ids['actn_btn_proj_pref'].disabled = False
self.ids['actn_btn_run_proj'].disabled = False
def on_statusbar_height(self, *args):
'''Callback for statusbar.height
'''
self.designer_content.y = self.statusbar.height
self.on_height(*args)
def on_actionbar_height(self, *args):
'''Callback for actionbar.height
'''
self.on_height(*args)
def on_height(self, *args):
'''Callback for self.height
'''
if self.actionbar and self.statusbar:
self.designer_content.height = self.height - \
self.actionbar.height - self.statusbar.height
self.designer_content.y = self.statusbar.height
def project_modified(self, *args):
'''Event Handler called when Project is modified outside Kivy Designer
'''
#To dispatch modified event only once for all files/folders of proj_dir
if self._proj_modified_outside:
return
self._confirm_dlg = ConfirmationDialog(
message="Current Project has been modified\n"
"outside the Kivy Designer.\nDo you want to reload project?")
self._confirm_dlg.bind(on_ok=self._perform_reload,
on_cancel=self._cancel_popup)
self._popup = Popup(title='Kivy Designer', content=self._confirm_dlg,
size_hint=(None, None), size=('200pt', '150pt'),
auto_dismiss=False)
self._popup.open()
self._proj_modified_outside = True
def _perform_reload(self, *args):
'''Perform reload of project after it is modified
'''
#Perform reload of project after it is modified
self._popup.dismiss()
self.project_watcher.allow_event_dispatch = False
self._perform_open(self.project_loader.proj_dir)
self.project_watcher.allow_event_dispatch = True
self._proj_modified_outside = False
def on_show_edit(self, *args):
'''Event Handler of 'on_show_edit' event. This will show EditContView
in ActionBar
'''
if isinstance(self.actionbar.children[0], EditContView):
return
if self.editcontview is None:
self.editcontview = EditContView(
on_undo=self.action_btn_undo_pressed,
on_redo=self.action_btn_redo_pressed,
on_cut=self.action_btn_cut_pressed,
on_copy=self.action_btn_copy_pressed,
on_paste=self.action_btn_paste_pressed,
on_delete=self.action_btn_delete_pressed,
on_selectall=self.action_btn_select_all_pressed,
on_next_screen=self._next_screen,
on_prev_screen=self._prev_screen)
self.actionbar.add_widget(self.editcontview)
widget = self.ui_creator.propertyviewer.widget
if isinstance(widget, Carousel) or\
isinstance(widget, ScreenManager) or\
isinstance(widget, TabbedPanel):
self.editcontview.show_action_btn_screen(True)
else:
self.editcontview.show_action_btn_screen(False)
if self.ui_creator.kv_code_input.clicked:
self._edit_selected = 'KV'
elif self.ui_creator.playground.clicked:
self._edit_selected = 'Play'
else:
self._edit_selected = 'Py'
self.ui_creator.playground.clicked = False
self.ui_creator.kv_code_input.clicked = False
def _prev_screen(self, *args):
'''Event handler for 'on_prev_screen' for self.editcontview
'''
widget = self.ui_creator.propertyviewer.widget
if isinstance(widget, Carousel):
widget.load_previous()
elif isinstance(widget, ScreenManager):
widget.current = widget.previous()
elif isinstance(widget, TabbedPanel):
index = widget.tab_list.index(widget.current_tab)
if len(widget.tab_list) <= index + 1:
return
widget.switch_to(widget.tab_list[index + 1])
def _next_screen(self, *args):
'''Event handler for 'on_next_screen' for self.editcontview
'''
widget = self.ui_creator.propertyviewer.widget
if isinstance(widget, Carousel):
widget.load_next()
elif isinstance(widget, ScreenManager):
widget.current = widget.next()
elif isinstance(widget, TabbedPanel):
index = widget.tab_list.index(widget.current_tab)
if index == 0:
return
widget.switch_to(widget.tab_list[index - 1])
def on_touch_down(self, touch):
'''Override of FloatLayout.on_touch_down. Used to determine where
touch is down and to call self.actionbar.on_previous
'''
if not isinstance(self.actionbar.children[0], EditContView) or\
self.actionbar.collide_point(*touch.pos):
return super(FloatLayout, self).on_touch_down(touch)
self.actionbar.on_previous(self)
return super(FloatLayout, self).on_touch_down(touch)
def action_btn_new_pressed(self, *args):
'''Event Handler when ActionButton "New" is pressed.
'''
if not self._curr_proj_changed:
self._show_new_dialog()
return
self._confirm_dlg = ConfirmationDialog('All unsaved changes will be'
' lost.\n'
'Do you want to continue?')
self._confirm_dlg.bind(on_ok=self._show_new_dialog,
on_cancel=self._cancel_popup)
self._popup = Popup(title='New', content=self._confirm_dlg,
size_hint=(None, None), size=('200pt', '150pt'),
auto_dismiss=False)
self._popup.open()
def _show_new_dialog(self, *args):
if hasattr(self, '_popup'):
self._popup.dismiss()
self._new_dialog = NewProjectDialog()
self._new_dialog.bind(on_select=self._perform_new,
on_cancel=self._cancel_popup)
self._popup = Popup(title='New Project', content=self._new_dialog,
size_hint=(None, None), size=('650pt', '450pt'),
auto_dismiss=False)
self._popup.open()
def _perform_new(self, *args):
'''To load new project
'''
if hasattr(self, '_popup'):
self._popup.dismiss()
self.cleanup()
new_proj_dir = os.path.join(get_kivy_designer_dir(),
NEW_PROJECT_DIR_NAME)
if os.path.exists(new_proj_dir):
shutil.rmtree(new_proj_dir)
os.mkdir(new_proj_dir)
template = self._new_dialog.adapter.selection[0].text
kv_file = NEW_PROJECTS[template][0]
py_file = NEW_PROJECTS[template][1]
_dir = os.path.dirname(designer.__file__)
_dir = os.path.split(_dir)[0]
templates_dir = os.path.join(_dir, NEW_TEMPLATES_DIR)
shutil.copy(os.path.join(templates_dir, py_file),
os.path.join(new_proj_dir, "main.py"))
shutil.copy(os.path.join(templates_dir, kv_file),
os.path.join(new_proj_dir, "main.kv"))
self.ui_creator.playground.sandbox.error_active = True
with self.ui_creator.playground.sandbox:
self.project_loader.load_new_project(os.path.join(new_proj_dir,
"main.kv"))
root_wigdet = self.project_loader.get_root_widget()
self.ui_creator.playground.add_widget_to_parent(root_wigdet, None,
from_undo=True)
self.ui_creator.kv_code_input.text = \
self.project_loader.get_full_str()
self.designer_content.update_tree_view(self.project_loader)
self._add_designer_content()
if self.project_loader.class_rules:
for i, _rule in enumerate(self.project_loader.class_rules):
widgets.append((_rule.name, 'custom'))
self.designer_content.toolbox.add_custom()
self.ui_creator.playground.sandbox.error_active = False
def cleanup(self):
'''To cleanup everything loaded by the current project before loading
another project.
'''
self.project_loader.cleanup()
self.ui_creator.cleanup()
self.undo_manager.cleanup()
self.designer_content.toolbox.cleanup()
for node in self.proj_tree_view.root.nodes[:]:
self.proj_tree_view.remove_node(node)
for widget in widgets[:]:
if widget[1] == 'custom':
widgets.remove(widget)
self._curr_proj_changed = False
self.ui_creator.kv_code_input.text = ""
self.designer_content.tab_pannel.list_py_code_inputs = []
for th in self.designer_content.tab_pannel.tab_list[:-1]:
self.designer_content.tab_pannel.remove_widget(th)
def action_btn_open_pressed(self, *args):
'''Event Handler when ActionButton "Open" is pressed.
'''
if not self._curr_proj_changed:
self._show_open_dialog()
return
self._confirm_dlg = ConfirmationDialog('All unsaved changes will be '
'lost.\n'
'Do you want to continue?')
self._confirm_dlg.bind(on_ok=self._show_open_dialog,
on_cancel=self._cancel_popup)
self._popup = Popup(title='Kivy Designer', content=self._confirm_dlg,
size_hint=(None, None), size=('200pt', '150pt'),
auto_dismiss=False)
self._popup.open()
def _show_open_dialog(self, *args):
'''To show FileBrowser to "Open" a project
'''
if hasattr(self, '_popup'):
self._popup.dismiss()
self._fbrowser = FileBrowser(select_string='Open')
def_path = os.getcwd()
if not self.project_loader.new_project and \
self.project_loader.proj_dir:
def_path = self.project_loader.proj_dir
if self._fbrowser.ids.tabbed_browser.current_tab.text == 'List View':
self._fbrowser.ids.list_view.path = def_path
else:
self._fbrowser.ids.icon_view.path = def_path
self._fbrowser.bind(on_success=self._fbrowser_load,
on_canceled=self._cancel_popup)
self._popup = Popup(title="Open", content=self._fbrowser,
size_hint=(0.9, 0.9), auto_dismiss=False)
self._popup.open()
def _select_class_selected(self, *args):
'''Event Handler for 'on_select' event of self._select_class
'''
selection = self._select_class.listview.adapter.selection[0].text
with self.ui_creator.playground.sandbox:
root_widget = self.project_loader.set_root_widget(selection)
self.ui_creator.playground.add_widget_to_parent(root_widget,
None,
from_undo=True)
self.ui_creator.kv_code_input.text = \
self.project_loader.get_root_str()
self._select_class_popup.dismiss()
def _select_class_cancel(self, *args):
'''Event Handler for 'on_cancel' event of self._select_class
'''
self._select_class_popup.dismiss()
def _fbrowser_load(self, instance):
'''Event Handler for 'on_load' event of self._fbrowser
'''
if instance.selection == []:
return
file_path = instance.selection[0]
self._popup.dismiss()
self._perform_open(file_path)
def _perform_open(self, file_path):
'''To open a project given by file_path
'''
for widget in widgets[:]:
if widget[1] == 'custom':
widgets.remove(widget)
self.cleanup()
self.ui_creator.playground.sandbox.error_active = True
root_widget = None
with self.ui_creator.playground.sandbox:
try:
self.project_loader.load_project(file_path)
if self.project_loader.class_rules:
for i, _rule in enumerate(self.project_loader.class_rules):
widgets.append((_rule.name, 'custom'))
self.designer_content.toolbox.add_custom()
#to test listview
#root_wigdet = None
root_wigdet = self.project_loader.get_root_widget()
if not root_wigdet:
#Show list box showing widgets
self._select_class = SelectClass(
self.project_loader.class_rules)
self._select_class.bind(
on_select=self._select_class_selected,
on_cancel=self._select_class_cancel)
self._select_class_popup = Popup(
title="Select Root Widget",
content=self._select_class,
size_hint=(0.5, 0.5),
auto_dismiss=False)
self._select_class_popup.open()
else:
self.ui_creator.playground.add_widget_to_parent(
root_wigdet, None, from_undo=True)
self.ui_creator.kv_code_input.text = \
self.project_loader.get_full_str()
self.recent_manager.add_file(file_path)
#Record everything for later use
self.project_loader.record()
self.designer_content.update_tree_view(self.project_loader)
self._add_designer_content()
except Exception as e:
self.statusbar.show_message('Cannot load Project: %s' %
(str(e)))
self.ui_creator.playground.sandbox.error_active = False
def _cancel_popup(self, *args):
'''EventHandler for all self._popup when self._popup.content
emits 'on_cancel' or equivalent.
'''
self._proj_modified_outside = False
self._popup.dismiss()
def action_btn_save_pressed(self, *args):
'''Event Handler when ActionButton "Save" is pressed.
'''
if self.project_loader.root_rule:
try:
if self.project_loader.new_project:
self.action_btn_save_as_pressed()
return
else:
self.project_loader.save_project()
projdir = self.project_loader.proj_dir
self.project_loader.cleanup(stop_watcher=False)
self.ui_creator.playground.cleanup()
self.project_loader.load_project(projdir)
root_wigdet = self.project_loader.get_root_widget()
self.ui_creator.playground.add_widget_to_parent(
root_wigdet, None, from_undo=True, from_kv=True)
self._curr_proj_changed = False
self.statusbar.show_message('Project saved successfully')
except:
self.statusbar.show_message('Cannot save project')
def action_btn_save_as_pressed(self, *args):
'''Event Handler when ActionButton "Save As" is pressed.
'''
if self.project_loader.root_rule:
self._curr_proj_changed = False
self._save_as_browser = FileBrowser(select_string='Save')
def_path = os.getcwd()
if not self.project_loader.new_project and \
self.project_loader.proj_dir:
def_path = self.project_loader.proj_dir
if self._save_as_browser.ids.tabbed_browser.current_tab.text == \
'List View':
self._save_as_browser.ids.list_view.path = def_path
else:
self._save_as_browser.ids.icon_view.path = def_path
self._save_as_browser.bind(on_success=self._perform_save_as,
on_canceled=self._cancel_popup)
self._popup = Popup(title="Enter Folder Name",
content=self._save_as_browser,
size_hint=(0.9, 0.9), auto_dismiss=False)
self._popup.open()
def _perform_save_as(self, instance):
'''Event handler for 'on_success' event of self._save_as_browser
'''
if hasattr(self, '_popup'):
self._popup.dismiss()
proj_dir = ''
if instance.ids.tabbed_browser.current_tab.text == 'List View':
proj_dir = instance.ids.list_view.path
else:
proj_dir = instance.ids.icon_view.path
proj_dir = os.path.join(proj_dir, instance.filename)
try:
self.project_loader.save_project(proj_dir)
self.recent_manager.add_file(proj_dir)
projdir = self.project_loader.proj_dir
self.project_loader.cleanup()
self.ui_creator.playground.cleanup()
self.project_loader.load_project(projdir)
root_wigdet = self.project_loader.get_root_widget()
self.ui_creator.playground.add_widget_to_parent(root_wigdet,
None,
from_undo=True)
self.statusbar.show_message('Project saved successfully')
except:
self.statusbar.show_message('Cannot save project')
def action_btn_settings_pressed(self, *args):
'''Event handler for 'on_release' event of
DesignerActionButton "Settings"
'''
self.designer_settings.parent = None
self._popup = Popup(title="Kivy Designer Settings",
content=self.designer_settings,
size_hint=(None, None),
size=(600, 400), auto_dismiss=False)
self._popup.open()
def action_btn_recent_files_pressed(self, *args):
'''Event Handler when ActionButton "Recent Files" is pressed.
'''
pass
def fill_recent_menu(self, *args):
'''Fill self.recent_files_cont_menu with DesignerActionButton
of all Recent Files
'''
recent_menu = self.recent_files_cont_menu
for _file in self.recent_manager.list_files:
act_btn = DesignerActionButton(text=_file, shorten=True)
recent_menu.add_widget(act_btn)
act_btn.bind(on_release=self._recent_file_release)
def _recent_file_release(self, instance, *args):
'''Event Handler for 'on_select' event of self._recent_dlg.
'''
self._perform_open(instance.text)
def action_btn_quit_pressed(self, *args):
'''Event Handler when ActionButton "Quit" is pressed.
'''
App.get_running_app().stop()
def action_btn_undo_pressed(self, *args):
'''Event Handler when ActionButton "Undo" is pressed.
'''
if self._edit_selected == 'Play':
self.undo_manager.do_undo()
elif self._edit_selected == 'KV':
self.ui_creator.kv_code_input.do_undo()
elif self._edit_selected == 'Py':
list_py = self.designer_content.tab_pannel.list_py_code_inputs
for code_input in list_py:
if code_input.clicked is True:
code_input.clicked = False
code_input.do_undo()
def action_btn_redo_pressed(self, *args):
'''Event Handler when ActionButton "Redo" is pressed.
'''
if self._edit_selected == 'Play':
self.undo_manager.do_redo()
elif self._edit_selected == 'KV':
self.ui_creator.kv_code_input.do_redo()
elif self._edit_selected == 'Py':
list_py = self.designer_content.tab_pannel.list_py_code_inputs
for code_input in list_py:
if code_input.clicked is True:
code_input.clicked = False
code_input.do_redo()
def action_btn_cut_pressed(self, *args):
'''Event Handler when ActionButton "Cut" is pressed.
'''
if self._edit_selected == 'Play':
self.ui_creator.playground.do_cut()
elif self._edit_selected == 'KV':
self.ui_creator.kv_code_input.do_cut()
elif self._edit_selected == 'Py':
list_py = self.designer_content.tab_pannel.list_py_code_inputs
for code_input in list_py:
if code_input.clicked is True:
code_input.clicked = False
code_input.do_cut()
def action_btn_copy_pressed(self, *args):
'''Event Handler when ActionButton "Copy" is pressed.
'''
if self._edit_selected == 'Play':
self.ui_creator.playground.do_copy()
elif self._edit_selected == 'KV':
self.ui_creator.kv_code_input.do_copy()
elif self._edit_selected == 'Py':
list_py = self.designer_content.tab_pannel.list_py_code_inputs
for code_input in list_py:
if code_input.clicked is True:
code_input.clicked = False
code_input.do_copy()
def action_btn_paste_pressed(self, *args):
'''Event Handler when ActionButton "Paste" is pressed.
'''
if self._edit_selected == 'Play':
self.ui_creator.playground.do_paste()
elif self._edit_selected == 'KV':
self.ui_creator.kv_code_input.do_paste()
elif self._edit_selected == 'Py':
list_py = self.designer_content.tab_pannel.list_py_code_inputs
for code_input in list_py:
if code_input.clicked is True:
code_input.clicked = False
code_input.do_paste()
def action_btn_delete_pressed(self, *args):
'''Event Handler when ActionButton "Delete" is pressed.
'''
if self._edit_selected == 'Play':
self.ui_creator.playground.do_delete()
elif self._edit_selected == 'KV':
self.ui_creator.kv_code_input.do_delete()
elif self._edit_selected == 'Py':
list_py = self.designer_content.tab_pannel.list_py_code_inputs
for code_input in list_py:
if code_input.clicked is True:
code_input.clicked = False
code_input.do_delete()
def action_btn_select_all_pressed(self, *args):
'''Event Handler when ActionButton "Select All" is pressed.
'''
if self._edit_selected == 'Play':
self.ui_creator.playground.do_select_all()
elif self._edit_selected == 'KV':
self.ui_creator.kv_code_input.do_select_all()
elif self._edit_selected == 'Py':
list_py = self.designer_content.tab_pannel.list_py_code_inputs
for code_input in list_py:
if code_input.clicked is True:
code_input.clicked = False
code_input.do_select_all()
def action_btn_add_custom_widget_press(self, *args):
'''Event Handler when ActionButton "Add Custom Widget" is pressed.
'''
self._custom_browser = FileBrowser(select_string='Add')
self._custom_browser.bind(on_success=self._custom_browser_load,
on_canceled=self._cancel_popup)
self._popup = Popup(title="Add Custom Widget",
content=self._custom_browser,
size_hint=(0.9, 0.9), auto_dismiss=False)
self._popup.open()
def _custom_browser_load(self, instance):
'''Event Handler for 'on_success' event of self._custom_browser
'''
file_path = instance.selection[0]
self._popup.dismiss()
self.ui_creator.playground.sandbox.error_active = True
with self.ui_creator.playground.sandbox:
try:
self.project_loader.add_custom_widget(file_path)
self.designer_content.toolbox.cleanup()
for _rule in (self.project_loader.custom_widgets):
widgets.append((_rule.name, 'custom'))
self.designer_content.toolbox.add_custom()
except ProjectLoaderException as e:
self.statusbar.show_message('Cannot load widget. %s' % str(e))
self.ui_creator.playground.sandbox.error_active = False
def action_chk_btn_toolbox_active(self, chk_btn):
'''Event Handler when ActionCheckButton "Toolbox" is activated.
'''
if chk_btn.checkbox.active:
self._toolbox_parent.add_widget(
self.designer_content.splitter_tree)
self.designer_content.splitter_tree.width = self._toolbox_width
else:
self._toolbox_parent = self.designer_content.splitter_tree.parent
self._toolbox_parent.remove_widget(
self.designer_content.splitter_tree)
self._toolbox_width = self.designer_content.splitter_tree.width
self.designer_content.splitter_tree.width = 0
def action_chk_btn_property_viewer_active(self, chk_btn):
'''Event Handler when ActionCheckButton "Property Viewer" is activated.
'''
if chk_btn.checkbox.active:
self._toggle_splitter_widget_tree()
if self.ui_creator.splitter_widget_tree.parent is None:
self._splitter_widget_tree_parent.add_widget(
self.ui_creator.splitter_widget_tree)
self.ui_creator.splitter_widget_tree.width = \
self._splitter_widget_tree_width
add_tree = False
if self.ui_creator.grid_widget_tree.parent is not None:
add_tree = True
self.ui_creator.splitter_property.size_hint_y = None
self.ui_creator.splitter_property.height = 300
self._splitter_property_parent.clear_widgets()
if add_tree:
self._splitter_property_parent.add_widget(
self.ui_creator.grid_widget_tree)
self._splitter_property_parent.add_widget(
self.ui_creator.splitter_property)
else:
self._splitter_property_parent = \
self.ui_creator.splitter_property.parent
self._splitter_property_parent.remove_widget(
self.ui_creator.splitter_property)
self._toggle_splitter_widget_tree()
def action_chk_btn_widget_tree_active(self, chk_btn):
'''Event Handler when ActionCheckButton "Widget Tree" is activated.
'''
if chk_btn.checkbox.active:
self._toggle_splitter_widget_tree()
add_prop = False
if self.ui_creator.splitter_property.parent is not None:
add_prop = True
self._grid_widget_tree_parent.clear_widgets()
self._grid_widget_tree_parent.add_widget(
self.ui_creator.grid_widget_tree)
if add_prop:
self._grid_widget_tree_parent.add_widget(
self.ui_creator.splitter_property)
self.ui_creator.splitter_property.size_hint_y = None
self.ui_creator.splitter_property.height = 300
else:
self._grid_widget_tree_parent = \
self.ui_creator.grid_widget_tree.parent
self._grid_widget_tree_parent.remove_widget(
self.ui_creator.grid_widget_tree)
self.ui_creator.splitter_property.size_hint_y = 1
self._toggle_splitter_widget_tree()
def _toggle_splitter_widget_tree(self):
'''To show/hide splitter_widget_tree
'''
if self.ui_creator.splitter_widget_tree.parent is not None and\
self.ui_creator.splitter_property.parent is None and\
self.ui_creator.grid_widget_tree.parent is None:
self._splitter_widget_tree_parent = \
self.ui_creator.splitter_widget_tree.parent
self._splitter_widget_tree_parent.remove_widget(
self.ui_creator.splitter_widget_tree)
self._splitter_widget_tree_width = \
self.ui_creator.splitter_widget_tree.width
self.ui_creator.splitter_widget_tree.width = 0
elif self.ui_creator.splitter_widget_tree.parent is None:
self._splitter_widget_tree_parent.add_widget(
self.ui_creator.splitter_widget_tree)
self.ui_creator.splitter_widget_tree.width = \
self._splitter_widget_tree_width
def action_chk_btn_status_bar_active(self, chk_btn):
'''Event Handler when ActionCheckButton "StatusBar" is activated.
'''
if chk_btn.checkbox.active:
self._statusbar_parent.add_widget(self.statusbar)
self.statusbar.height = self._statusbar_height
else:
self._statusbar_parent = self.statusbar.parent
self._statusbar_height = self.statusbar.height
self._statusbar_parent.remove_widget(self.statusbar)
self.statusbar.height = 0
def action_chk_btn_kv_area_active(self, chk_btn):
'''Event Handler when ActionCheckButton "KVLangArea" is activated.
'''
if chk_btn.checkbox.active:
self.ui_creator.splitter_kv_code_input.height = \
self._kv_area_height
self._kv_area_parent.add_widget(
self.ui_creator.splitter_kv_code_input)
else:
self._kv_area_parent = \
self.ui_creator.splitter_kv_code_input.parent
self._kv_area_height = \
self.ui_creator.splitter_kv_code_input.height
self.ui_creator.splitter_kv_code_input.height = 0
self._kv_area_parent.remove_widget(
self.ui_creator.splitter_kv_code_input)
def _error_adding_file(self, *args):
'''Event Handler for 'on_error' event of self._add_file_dlg
'''
self.statusbar.show_message('Error while adding file to project')
self._popup.dismiss()
def _added_file(self, *args):
'''Event Handler for 'on_added' event of self._add_file_dlg
'''
self.statusbar.show_message('File successfully added to project')
self._popup.dismiss()
if self._add_file_dlg.target_file[3:] == '.py':
self.designer_content.add_file_to_tree_view(
self._add_file_dlg.target_file)
def action_btn_add_file_pressed(self, *args):
'''Event Handler when ActionButton "Add File" is pressed.
'''
self._add_file_dlg = AddFileDialog(self.project_loader)
self._add_file_dlg.bind(on_added=self._added_file,
on_error=self._error_adding_file,
on_cancel=self._cancel_popup)
self._popup = Popup(title="Add File",
content=self._add_file_dlg,
size_hint=(None, None),
size=(400, 300), auto_dismiss=False)
self._popup.open()
def action_btn_project_pref_pressed(self, *args):
'''Event Handler when ActionButton "Project Prefences" is pressed.
'''
self.proj_settings = ProjectSettings(proj_loader=self.project_loader)
self.proj_settings.load_proj_settings()
self.proj_settings.bind(on_close=self._cancel_popup)
self._popup = Popup(title="Project Preferences",
content=self.proj_settings,
size_hint=(None, None),
size=(600, 400), auto_dismiss=False)
self._popup.open()
def action_btn_run_project_pressed(self, *args):
'''Event Handler when ActionButton "Run" is pressed.
'''
if self.project_loader.file_list == []:
return
args = ''
envs = ''
python_path = self.designer_settings.config_parser.getdefault(
'global', 'python_shell_path', '')
if python_path == '':
self.statusbar.show_message("Python Shell Path not specified,"
" please specify it before running"
" project")
return
if self.proj_settings and self.proj_settings.config_parser:
args = self.proj_settings.config_parser.getdefault('arguments',
'arg', '')
envs = self.proj_settings.config_parser.getdefault(
'env variables', 'env', '')
for env in envs.split(' '):
self.ui_creator.kivy_console.environment[
env[:env.find('=')]] = env[env.find('=')+1:]
for _file in self.project_loader.file_list:
if 'main.py' in os.path.basename(_file):
self.ui_creator.kivy_console.stdin.write(
'"%s" "%s" %s' % (python_path, _file, args))
self.ui_creator.tab_pannel.switch_to(
self.ui_creator.tab_pannel.tab_list[2])
return
self.ui_creator.kivy_console.stdin.write(
'"%s" "%s" %s' % (python_path, self.project_loader._app_file, args))
self.ui_creator.tab_pannel.switch_to(
self.ui_creator.tab_pannel.tab_list[2])
def on_sandbox_getting_exception(self, *args):
'''Event Handler for
:class:`~designer.uix.designer_sandbox.DesignerSandbox`
on_getting_exception event. This function will add exception
string in error_console.
'''
s = traceback.format_list(traceback.extract_tb(
self.ui_creator.playground.sandbox.tb))
s = '\n'.join(s)
to_insert = "Exception:\n" + s + '\n' + \
"{!r}".format(self.ui_creator.playground.sandbox.exception)
text = self.ui_creator.error_console.text + to_insert + '\n\n'
self.ui_creator.error_console.text = text
if self.ui_creator.playground.sandbox.error_active:
self.ui_creator.tab_pannel.switch_to(
self.ui_creator.tab_pannel.tab_list[0])
self.ui_creator.playground.sandbox.error_active = False
def action_btn_about_pressed(self, *args):
'''Event handler for 'on_release' event of DesignerActionButton
"About Kivy Designer"
'''
self.about_dlg = AboutDialog()
self._popup = Popup(title='About Kivy Designer',
content=self.about_dlg,
size_hint=(None, None), size=(600, 400),
auto_dismiss=False)
self._popup.open()
self.about_dlg.bind(on_cancel=self._cancel_popup)
class DesignerApp(App):
widget_focused = ObjectProperty(allownone=True)
'''Currently focused widget
'''
title = 'Kivy Designer'
def on_stop(self, *args):
self.root.ui_creator.py_console.exit()
def build(self):
Factory.register('Playground', module='designer.playground')
Factory.register('Toolbox', module='designer.toolbox')
Factory.register('StatusBar', module='designer.statusbar')
Factory.register('PropertyViewer', module='designer.propertyviewer')
Factory.register('EventViewer', module='designer.eventviewer')
Factory.register('WidgetsTree', module='designer.nodetree')
Factory.register('UICreator', module='designer.ui_creator')
Factory.register('DesignerContent',
module='designer.designer_content')
Factory.register('KivyConsole', module='designer.uix.kivy_console')
Factory.register('PythonConsole', module='designer.uix.py_console')
Factory.register('DesignerContent',
module='designer.uix.designer_sandbox')
Factory.register('EventDropDown', module='designer.eventviewer')
Factory.register('DesignerActionPrevious',
module='designer.uix.designer_action_items')
Factory.register('DesignerActionGroup',
module='designer.uix.designer_action_items')
Factory.register('DesignerActionButton',
module='designer.uix.designer_action_items')
Factory.register('DesignerActionSubMenu',
module='designer.uix.designer_action_items')
Factory.register('DesignerStartPage', module='designer.start_page')
Factory.register('DesignerLinkLabel', module='designer.start_page')
Factory.register('RecentFilesBox', module='designer.start_page')
Factory.register('ContextMenu', module='designer.uix.contextual')
self._widget_focused = None
self.root = Designer()
Clock.schedule_once(self._setup)
def _setup(self, *args):
'''To setup the properties of different classes
'''
self.root.proj_tree_view = self.root.designer_content.tree_view
self.root.ui_creator = self.root.designer_content.ui_creator
self.root.statusbar.playground = self.root.ui_creator.playground
self.root.project_loader.kv_code_input = \
self.root.ui_creator.kv_code_input
self.root.project_loader.tab_pannel = \
self.root.designer_content.tab_pannel
self.root.ui_creator.playground.undo_manager = self.root.undo_manager
self.root.ui_creator.kv_code_input.project_loader = \
self.root.project_loader
self.root.ui_creator.kv_code_input.statusbar = self.root.statusbar
self.root.ui_creator.widgettree.project_loader = \
self.root.project_loader
self.root.ui_creator.eventviewer.project_loader = \
self.root.project_loader
self.root.ui_creator.eventviewer.designer_tabbed_panel = \
self.root.designer_content.tab_pannel
self.root.ui_creator.eventviewer.statusbar = self.root.statusbar
self.root.statusbar.bind(height=self.root.on_statusbar_height)
self.root.actionbar.bind(height=self.root.on_actionbar_height)
self.root.ui_creator.playground.sandbox = DesignerSandbox()
self.root.ui_creator.playground.add_widget(
self.root.ui_creator.playground.sandbox)
self.root.ui_creator.playground.sandbox.pos = \
self.root.ui_creator.playground.pos
self.root.ui_creator.playground.sandbox.size = \
self.root.ui_creator.playground.size
self.root.start_page.recent_files_box.root = self.root
self.root.ui_creator.playground.sandbox.bind(
on_getting_exception=self.root.on_sandbox_getting_exception)
self.bind(widget_focused=
self.root.ui_creator.propertyviewer.setter('widget'))
self.bind(widget_focused=
self.root.ui_creator.eventviewer.setter('widget'))
self.focus_widget(self.root.ui_creator.playground.root)
self.create_kivy_designer_dir()
self.root.start_page.recent_files_box.add_recent(
self.root.recent_manager.list_files)
self.root.fill_recent_menu()
def create_kivy_designer_dir(self):
'''To create the ~/.kivy-designer dir
'''
if not os.path.exists(get_kivy_designer_dir()):
os.mkdir(get_kivy_designer_dir())
def create_draggable_element(self, widgetname, touch, widget=None):
'''Create PlagroundDragElement and make it draggable
until the touch is released also search default args if exist
'''
container = None
if not widget:
default_args = {}
for options in widgets:
if len(options) > 2:
default_args = options[2]
container = self.root.ui_creator.playground.\
get_playground_drag_element(widgetname, touch, **default_args)
else:
container = PlaygroundDragElement(
playground=self.root.ui_creator.playground, child=widget)
touch.grab(container)
touch.grab_current = container
container.on_touch_move(touch)
container.center_x = touch.x
container.y = touch.y + 20
if container:
self.root.add_widget(container)
else:
self.root.statusbar.show_message("Cannot create %s" % widgetname)
container.widgettree = self.root.ui_creator.widgettree
return container
def focus_widget(self, widget, *largs):
'''Called when a widget is select in Playground. It will also draw
lines around focussed widget.
'''
if self._widget_focused and (widget is None or
self._widget_focused[0] != widget):
fwidget = self._widget_focused[0]
for instr in self._widget_focused[1:]:
fwidget.canvas.after.remove(instr)
self._widget_focused = []
self.widget_focused = widget
self.root.ui_creator.widgettree.refresh()
if not widget:
return
x, y = widget.pos
right, top = widget.right, widget.top
points = [x, y, right, y, right, top, x, top]
if self._widget_focused:
line = self._widget_focused[2]
line.points = points
else:
from kivy.graphics import Color, Line
with widget.canvas.after:
color = Color(.42, .62, .65)
line = Line(points=points, close=True, width=2.)
self._widget_focused = [widget, color, line]
self.root.ui_creator.playground.clicked = True
self.root.on_show_edit()
| {
"repo_name": "5y/kivy-designer",
"path": "designer/app.py",
"copies": "1",
"size": "50457",
"license": "mit",
"hash": 831065526928783400,
"line_mean": 38.0836560806,
"line_max": 80,
"alpha_frac": 0.5884416434,
"autogenerated": false,
"ratio": 3.9943793540215324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5082820997421532,
"avg_score": null,
"num_lines": null
} |
__all__ = ['detect_stream',
'detect',
'dist_to_reference',
'dist',
'prob_class']
import numpy as np
def detect_stream(s_inf, N_obs, R_pos, R_neg, gamma=1, theta=1, D_req=1):
"""Algorithm 1.1
Perform online binary classification on the infinite stream s_inf using
sets of positive and negative reference signals R_pos and R_neg.
"""
consecutive_detections = 0
i = -1
while True:
i += 1
if i < (N_obs - 1):
continue
elif i >= len(s_inf):
return
s = s_inf[(i - N_obs + 1):(i + 1)]
if detect(s, R_pos, R_neg, gamma, theta):
consecutive_detections += 1
if consecutive_detections >= D_req:
return i
else:
consecutive_detections = 0
def detect(s, R_pos, R_neg, gamma=1, theta=1):
"""Algorithm 1.2
Perform binary classification on the signal s using sets of positive and
negative reference signals R_pos and R_neg.
"""
pos_dists = dist_to_reference(s, R_pos)
neg_dists = dist_to_reference(s, R_neg)
ratio = prob_class(pos_dists, gamma) / prob_class(neg_dists, gamma)
if theta is not None:
return ratio > theta
else:
return ratio
def dist_to_reference(s, r):
"""Algorithm 2
Compute the minimum distance between s and all pieces of r of the same
length as s.
"""
N_obs = s.shape[0]
N_ref = r.shape[1]
min_dists = None
for i in range(N_ref - N_obs + 1):
dists = dist(r[:, i:(i + N_obs)], s)
if min_dists is not None:
min_dists = np.fmin(min_dists, dists)
else:
min_dists = dists
return min_dists
def dist(s, t):
"""Algorithm 3
Compute the distance between two signals s and t of the same length.
"""
return np.sum((s - t) ** 2, 1)
def prob_class(Dists, gamma=1):
"""Algorithm 4
Using the distances Dists of an observation to the reference signals of a
certain class, compute a number proportional to the probability that the
observation belongs to that class.
"""
return np.sum(np.exp(Dists * -gamma))
| {
"repo_name": "norbert/hearsay",
"path": "hearsay/algorithms/nikolov.py",
"copies": "1",
"size": "2185",
"license": "mit",
"hash": -5165373796406067000,
"line_mean": 24.1149425287,
"line_max": 77,
"alpha_frac": 0.576201373,
"autogenerated": false,
"ratio": 3.5355987055016183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4611800078501618,
"avg_score": null,
"num_lines": null
} |
__all__ = ["DeviceManager"]
from adb_impl import AdbImpl
from android_device import AndroidDevice
from status import *
from base.log import VLOG
class DeviceManager(object):
def __init__(self, xdb):
self.xdb = xdb
self.active_devices = []
# Returns a device which will not be reassigned during its lifetime.
def AcquireDevice(self):
devices = []
status = self.xdb.GetDevices(devices)
if status.IsError():
return (status, None)
if not devices:
return (Status(kUnknownError, "There are no devices online"), None)
status = Status(kUnknownError, "All devices are in use (" + str(len(devices)) + " online)")
for it in devices:
if self.IsDeviceLocked(it):
self.active_devices.remove(it)
device = self.LockDevice(it)
status = Status(kOk)
break
else:
device = self.LockDevice(it)
status = Status(kOk)
break
return (status, device)
# Returns a device with the same guarantees as AcquireDevice, but fails
# if the device with the given serial number is not avaliable.
def AcquireSpecificDevice(self, device_serial):
devices = []
status = self.xdb.GetDevices(devices)
if status.IsError():
return (status, None)
if device_serial not in devices:
return (Status(kUnknownError, "Device " + device_serial + " is not online"), None)
if self.IsDeviceLocked(device_serial):
status = (Status(kUnknownError, "Device " + device_serial + " is already in use"), None)
else:
device = self.LockDevice(device_serial)
status = Status(kOk)
return (status, device)
def ReleaseDevice(self, device_serial):
self.active_devices.remove(device_serial)
return
def LockDevice(self, device_serial):
self.active_devices.append(device_serial)
# before following process, xdb'type must be choiced outside
if isinstance(self.xdb, AdbImpl):
return AndroidDevice(device_serial, self.xdb, self.ReleaseDevice)
else:
return AndroidDevice(device_serial, self.xdb, self.ReleaseDevice)
def IsDeviceLocked(self, device_serial):
return device_serial in self.active_devices
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/device_manager.py",
"copies": "1",
"size": "2164",
"license": "bsd-3-clause",
"hash": -4348556971932496400,
"line_mean": 32.2923076923,
"line_max": 95,
"alpha_frac": 0.6797597043,
"autogenerated": false,
"ratio": 3.7964912280701753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49762509323701754,
"avg_score": null,
"num_lines": null
} |
__all__ = ["DevToolsClientImpl"]
import copy
import json
import yaml
import time
from status import *
from devtools_client import DevToolsClient
from base.bind import Bind
from base.log import VLOG
from net.websocket_factory import WebsocketFactory
from third_party.websocket_client.websocket import WebSocketConnectionClosedException
from third_party.websocket_client.websocket import WebSocketTimeoutException
# InspectorMessageType
kEventMessageType = 0
kCommandResponseMessageType = 1
kInspectorContextError = "Execution context with given id not found."
class InspectorMessageType(object):
def __init__(self, typer=0):
self.typer = typer
class InspectorEvent(object):
def __init__(self, method="", params={}):
self.method = method
self.params = params
class InspectorCommandResponse(object):
def __init__(self, sid="", error="", result={}):
self.sid = sid
self.error = error
self.result = result
# ResponseState
# The client is waiting for the response.
kWaiting = 0
# The command response will not be received because it is blocked by an
# alert that the command triggered.
kBlocked = 1
# The client no longer cares about the response.
kIgnored = 2
# The response has been received.
kReceived = 3
class ResponseInfo(object):
def __init__(self, method=""):
self.method = method
self.state = kWaiting
self.response = InspectorCommandResponse()
def Update(self, other):
self.method = other.method
self.state = other.state
self.response = other.response
def _ParseInspectorError(error_json):
error_dict = yaml.load(error_json)
if type(error_dict) != dict:
return Status(kUnknownError, "inspector error with no error_message")
error_message = error_dict.get("message")
if type(error_message) == str and error_message == kInspectorContextError:
return Status(kNoSuchExecutionContext)
return Status(kUnknownError, "unhandled inspector error: " + error_json)
class _ScopedIncrementer(object):
def __init__(self, count):
# focus: count must be mutable object
self.reference = count
self.reference[0] += 1
def __del__(self):
self.reference[0] -= 1
# return status and is_condition_met
def _ConditionIsMet():
return (Status(kOk), True)
def _ParseInspectorMessage(message, expected_id, message_type, event, command_response):
message_dict = yaml.load(message)
if message == "" or type(message_dict) != dict:
return False
if not message_dict.has_key("id"):
method = message_dict.get("method")
if type(method) != str:
return False
params = message_dict.get("params", {})
message_type.typer = kEventMessageType
event.method = method
event.params = params
return True
elif type(message_dict["id"]) == int:
sid = message_dict["id"]
unscoped_error = message_dict.get("error")
unscoped_result = message_dict.get("result")
if type(unscoped_error) != dict and type(unscoped_result) != dict:
return False
message_type.typer = kCommandResponseMessageType
command_response.sid = sid
if unscoped_result:
command_response.result = unscoped_result
else:
command_response.error = json.dumps(unscoped_error)
return True
return False
class DevToolsClientImpl(DevToolsClient):
def __init__(self, factory, url, sid, frontend_closer_func, parser_func=Bind(_ParseInspectorMessage)):
DevToolsClient.__init__(self)
self.socket = factory.create_connection()
self.url = url
self.crashed = False
self.sid = sid
self.frontend_closer_func = frontend_closer_func
self.parser_func = parser_func
self.next_id = 1
# stack_count must be a list
self.stack_count = [0,]
self.listeners = []
self.unnotified_connect_listeners = []
self.unnotified_event_listeners = []
self.unnotified_cmd_response_listeners = []
self.unnotified_event = InspectorEvent()
self.unnotified_cmd_response_info = ResponseInfo()
self.response_info_map = {}
def Update(self, other):
self.socket = other.socket
self.url = other.url
self.crashed = other.crashed
self.sid = other.sid
self.frontend_closer_func = other.frontend_closer_func
self.parser_func = other.parser_func
self.next_id = other.next_id
# stack_count must be a list
self.stack_count = other.stack_count
self.listeners = other.listeners
self.unnotified_connect_listeners = other.unnotified_connect_listeners
self.unnotified_event_listeners = other.unnotified_event_listeners
self.unnotified_cmd_response_listeners = other.unnotified_cmd_response_listeners
self.unnotified_event = other.unnotified_event
self.unnotified_cmd_response_info = other.unnotified_cmd_response_info
self.response_info_map = other.response_info_map
def _SetParserFuncForTesting(self, parser_func):
self.parser_func = parser_func
return
# Overridden from DevToolsClient:
def GetId(self):
return self.sid
def WasCrashed(self):
return self.crashed
def ConnectIfNecessary(self):
if self.stack_count[0]:
return Status(kUnknownError, "cannot connect when nested")
if self.socket.connected:
return Status(kOk)
self.socket.connect(self.url)
if not self.socket.connected:
# Try to close devtools frontend and then reconnect.
status = self.frontend_closer_func.Run()
if status.IsError():
return status
self.socket.connect(self.url)
if not self.socket.connected:
return Status(kDisconnected, "unable to connect to renderer")
# shallow copy
self.unnotified_connect_listeners = copy.copy(self.listeners)
self.unnotified_event_listeners = []
self.response_info_map.clear()
#TODO (wyh)
# Notify all listeners of the new connection. Do this now so that any errors
# that occur are reported now instead of later during some unrelated call.
# Also gives listeners a chance to send commands before other clients.
return self._EnsureListenersNotifiedOfConnect()
def SendCommand(self, method, params):
status = self._SendCommandInternal(method, params, {})
return status
def SendCommandAndGetResult(self, method, params, result):
intermediate_result = {}
status = self._SendCommandInternal(method, params, intermediate_result)
if status.IsError():
return status,
if not intermediate_result:
return Status(kUnknownError, "inspector response missing result")
result.clear()
result.update(intermediate_result)
return status
def AddListener(self, listener):
self.listeners.append(listener)
def HandleEventsUntil(self, conditional_func, timeout):
if not self.socket.connected:
return Status(kDisconnected, "not connected to DevTools")
deadline = time.time() + timeout
next_message_timeout = timeout
while True:
if not self.socket._recv_buffer:
is_condition_met = False
(status, is_condition_met) = conditional_func.Run()
if status.IsError():
return status
if is_condition_met:
return Status(kOk)
status = self._ProcessNextMessage(-1, next_message_timeout)
if status.IsError():
return status
next_message_timeout = deadline - time.time()
def HandleReceivedEvents(self):
return self.HandleEventsUntil(Bind(_ConditionIsMet), 0)
def _SendCommandInternal(self, method, params, result):
if not self.socket.connected:
return Status(kDisconnected, "not connected to DevTools")
command_id = self.next_id
self.next_id += 1
command = {'id': command_id, 'method': method, 'params': params}
message = json.dumps(command)
try:
self.socket.send(message)
except WebSocketConnectionClosedException:
err = "unable to send message to renderer"
VLOG(3, err)
return Status(kDisconnected, err)
except:
err = "unknown reason of socket sending failure"
VLOG(3, err)
return Status(kDisconnected, err)
response_info = ResponseInfo(method)
self.response_info_map[command_id] = response_info
while (self.response_info_map[command_id].state) == kWaiting:
status = self._ProcessNextMessage(command_id, 600)
if status.IsError():
if self.response_info_map[command_id].state == kReceived:
del self.response_info_map[command_id]
return status
if self.response_info_map[command_id].state == kBlocked:
self.response_info_map[command_id].state = kIgnored
return Status(kUnexpectedAlertOpen)
response = self.response_info_map[command_id].response
if type(response.result) != dict:
return _ParseInspectorError(response.error)
result.clear()
result.update(response.result)
return Status(kOk)
def _ProcessNextMessage(self, expected_id, timeout):
_ScopedIncrementer(self.stack_count)
status = self._EnsureListenersNotifiedOfConnect()
if status.IsError():
return status
status = self._EnsureListenersNotifiedOfEvent()
if status.IsError():
return status
status = self._EnsureListenersNotifiedOfCommandResponse()
if status.IsError():
return status
# The command response may have already been received or blocked while notifying listeners.
if expected_id != -1 and self.response_info_map[expected_id].state != kWaiting:
return Status(kOk)
if self.crashed:
return Status(kTabCrashed)
try:
self.socket.settimeout(timeout)
message = self.socket.recv()
except WebSocketConnectionClosedException:
err = "Unable to receive message from renderer"
VLOG(3, err)
return Status(kDisconnected, err)
except WebSocketTimeoutException:
err = "Timed out receiving message from renderer: " + str(timeout)
VLOG(3, err)
return Status(kTimeout, err)
except:
err = "unknown reason of socket receiving failure"
VLOG(3, err)
return Status(kDisconnected, err)
# VLOG(0, "string from recv buffer of websocket: %s" % message)
message_type = InspectorMessageType()
event = InspectorEvent()
response = InspectorCommandResponse()
self.parser_func.Update([message, expected_id, message_type, event, response])
re = self.parser_func.Run()
if re == False:
VLOG(3, "Bad inspector message: " + message)
return Status(kUnknownError, "bad inspector message: " + message)
if message_type.typer == kEventMessageType:
return self._ProcessEvent(event)
return self._ProcessCommandResponse(response)
def _ProcessEvent(self, event):
VLOG(0, "DEVTOOLS EVENT " + event.method + " " + str(event.params))
self.unnotified_event_listeners = copy.copy(self.listeners)
self.unnotified_event = event
status = self._EnsureListenersNotifiedOfEvent()
self.unnotified_event = InspectorEvent()
if status.IsError():
return status
if event.method == "Inspector.detached":
return Status(kDisconnected, "received Inspector.detached event")
if event.method == "Inspector.targetCrashed":
self.crashed = True
return Status(kTabCrashed)
if event.method == "Page.javascriptDialogOpening":
#A command may have opened the dialog, which will block the response.
#To find out which one (if any), do a round trip with a simple command
#to the renderer and afterwards see if any of the commands still haven't
#received a response.
#This relies on the fact that DevTools commands are processed
#sequentially. This may break if any of the commands are asynchronous.
#If for some reason the round trip command fails, mark all the waiting
#commands as blocked and return the error. This is better than risking
#a hang.
max_id = self.next_id
enable_params = {"purpose": "detect if alert blocked any cmds"}
enable_status = self.SendCommand("Inspector.enable", enable_params)
for cur_id, response in self.response_info_map.iteritems():
if cur_id > max_id:
continue
if response.state == kWaiting:
response.state = kBlocked
if enable_status.IsError():
return status
return Status(kOk)
def _ProcessCommandResponse(self, response):
response_info = self.response_info_map.get(response.sid, None)
if None == response_info:
return Status(kUnknownError, "unexpected command response")
else:
method = response_info.method
if response.result:
result = str(response.result)
else:
result = response.error
VLOG(0, "DEVTOOLS RESPONSE " + method + " (id= " + str(response.sid) + ") " + result)
if response_info.state == kReceived:
return Status(kUnknownError, "received multiple command responses")
if response_info.state == kIgnored:
del self.response_info_map[response.sid]
else:
response_info.state = kReceived
response_info.response.sid = response.sid
response_info.response.error = response.error
if response.result:
response_info.response.result = response.result
if type(response.result) == dict:
self.unnotified_cmd_response_listeners = copy.copy(self.listeners)
self.unnotified_cmd_response_info = response_info
status = self._EnsureListenersNotifiedOfCommandResponse()
self.unnotified_cmd_response_info = ResponseInfo()
if status.IsError():
return status
return Status(kOk)
def _EnsureListenersNotifiedOfConnect(self):
while len(self.unnotified_connect_listeners):
listener = self.unnotified_connect_listeners[0]
del self.unnotified_connect_listeners[0]
status = listener.OnConnected(self)
if status.IsError():
return status
return Status(kOk)
def _EnsureListenersNotifiedOfEvent(self):
while len(self.unnotified_event_listeners):
listener = self.unnotified_event_listeners[0]
del self.unnotified_event_listeners[0]
status = listener.OnEvent(self, self.unnotified_event.method, self.unnotified_event.params)
if status.IsError():
return status
return Status(kOk)
def _EnsureListenersNotifiedOfCommandResponse(self):
while len(self.unnotified_cmd_response_listeners):
listener = self.unnotified_cmd_response_listeners[0]
del self.unnotified_cmd_response_listeners[0]
status = listener.OnCommandSuccess(self, self.unnotified_cmd_response_info.method)
if status.IsError():
return status
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/devtools_client_impl.py",
"copies": "1",
"size": "14578",
"license": "bsd-3-clause",
"hash": 5126628076973718000,
"line_mean": 35.7204030227,
"line_max": 104,
"alpha_frac": 0.6933735766,
"autogenerated": false,
"ratio": 3.8332895082829346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5026663084882934,
"avg_score": null,
"num_lines": null
} |
__all__ = ["DevToolsHttpClient", \
"WebViewsInfo", \
"WebViewInfo"]
import urllib2
import time
import yaml
from version import GetMinimumSupportedXwalkVersion
from devtools_client_impl import DevToolsClientImpl
from web_view_impl import WebViewImpl
from status import *
from base.log import VLOG
from base.bind import Bind
def _FakeCloseFrontends():
return Status(kOk)
def _ParseWebViewsInfo(data, views_info):
if type(data) != list:
return Status(kUnknownError, "DevTools did not return list")
temp_views_info = []
for item in data:
if type(item) != dict:
return Status(kUnknownError, "DevTools contains non-dictionary item")
sid = item.get("id")
if type(sid) != str:
return Status(kUnknownError, "DevTools did not include id")
type_as_string = item.get("type")
if type(type_as_string) != str:
return Status(kUnknownError, "DevTools did not include type")
url = item.get("url")
if type(url) != str:
return Status(kUnknownError, "DevTools did not include url")
debugger_url = item.get("webSocketDebuggerUrl")
if type(debugger_url) != str:
return Status(kUnknownError, "DevTools did not include debug url")
typer = WebViewInfo.kApp
if type_as_string == "app":
typer = WebViewInfo.kApp
elif type_as_string == "background_page":
typer = WebViewInfo.kBackgroundPage
elif type_as_string == "page":
typer = WebViewInfo.kPage
elif type_as_string == "worker":
typer = WebViewInfo.kWorker
elif type_as_string == "other":
typer = WebViewInfo.kOther
else:
return Status(kUnknownError, "DevTools returned unknown type:" + type_as_string)
temp_views_info.append(WebViewInfo(sid, debugger_url, url, typer))
views_info.views_info = temp_views_info
return Status(kOk)
# return status and xwalk-version
def _ParseVersionInfo(data):
if type(data) != dict:
return Status(kUnknownError, "version info not a dictionary"), ""
version = data.get("Browser")
if type(version) != str:
return Status(kUnknownError, "Xwalk version must be >= " + \
GetMinimumSupportedXwalkVersion() + \
"version info doesn't include string 'Browser'"), ""
return Status(kOk), version
class WebViewInfo(object):
kApp = 0
kBackgroundPage = 1
kPage = 3
kWorker = 4
kOther = 5
def __init__(self, sid, debugger_url, url, typer):
self.sid = sid
self.debugger_url = debugger_url
self.url = url
self.typer = typer
def Update(self, other):
self.sid = other.sid
self.debugger_url = other.debugger_url
self.url = other.url
self.typer = other.typer
def IsFrontend(self):
return self.url.find("chrome-devtools://") == 0
class WebViewsInfo(object):
def __init__(self, info=[]):
self.views_info = info
def Update(self, other):
self.views_info = other.views_info
def Get(self, index):
return self.views_info[index]
def GetSize(self):
return len(self.views_info)
def GetForId(self, sid):
for info in self.views_info:
if info.sid == sid:
return info
return None
class DevToolsHttpClient(object):
def __init__(self, address, socket_factory):
self.socket_factory = socket_factory
self.server_url = "http://" + address
self.web_socket_url_prefix = "ws://" + address + "/devtools/page/"
self.version = ""
self.build_no = ""
def Update(self, other):
self.socket_factory = other.socket_factory
self.server_url = other.server_url
self.web_socket_url_prefix = other.web_socket_url_prefix
self.version = other.version
self.build_no = other.build_no
def Init(self, timeout):
deadline = time.time() + timeout
#VLOG(0, "DevTools server address is " + self.server_url)
while True:
status, devtools_version = self.GetVersion()
if status.IsOk():
break
if status.Code() != kXwalkNotReachable or time.time() > deadline:
return status
time.sleep(0.05)
kToTBuildNo = '9999'
if not len(devtools_version):
# Content Shell has an empty product version and a fake user agent.
# There's no way to detect the actual version, so assume it is tip of tree.
self.version = "content shell"
self.build_no = kToTBuildNo
return Status(kOk)
if devtools_version.find("Version/") == 0:
self.version = "webview"
self.build_no = kToTBuildNo
return Status(kOk)
prefix = "Chrome/"
if devtools_version.find(prefix) != 0:
return Status(kUnknownError, "unrecognized Xwalk version: " + devtools_version)
stripped_version = devtools_version[len(prefix):]
version_parts = stripped_version.split('.')
if len(version_parts) != 4:
return Status(kUnknownError, "unrecognized Xwalk version: " + devtools_version)
self.version = stripped_version
try:
self.build_no = str(version_parts[2])
#VLOG(0, "we get build no: " + self.build_no)
except:
return Status(kUnknownError, "unrecognized Xwalk version: " + devtools_version)
return Status(kOk)
def GetWebViewsInfo(self, views_info):
re, data = self.FetchUrlAndLog(self.server_url + "/json")
if not re:
return Status(kXwalkNotReachable)
return _ParseWebViewsInfo(data, views_info)
def CreateClient(self, sid):
return DevToolsClientImpl(self.socket_factory, \
self.web_socket_url_prefix + sid, \
sid, \
Bind(self.CloseFrontends, [sid]))
def CloseWebView(self, sid):
re, data = self.FetchUrlAndLog(self.server_url + "/json/close/" + sid)
if not re:
return Status(kOk)
# Closing the last web view leads xwalk to quit.
# Wait for the target window to be completely closed.
deadline = time.time() + 20
while time.time() < deadline:
views_info = WebViewsInfo()
status = self.GetWebViewsInfo(views_info)
if status.Code() == kXwalkNotReachable:
return Status(kOk)
if status.IsError():
return status
if not views_info.GetForId(sid):
return Status(kOk)
time.sleep(0.050)
return Status(kUnknownError, "failed to close window in 20 seconds")
def ActivateWebView(self, sid):
re, data = self.FetchUrlAndLog(self.server_url + "/json/activate/" + sid)
if not re:
return Status(kUnknownError, "cannot activate web view")
return Status(kOk)
# return status and verison
def GetVersion(self):
re, data = self.FetchUrlAndLog(self.server_url + "/json/version")
if not re:
return Status(kXwalkNotReachable), ""
return _ParseVersionInfo(data)
def CloseFrontends(self, for_client_id):
views_info = WebViewsInfo()
status = self.GetWebViewsInfo(views_info)
if status.IsError():
return status
# Close frontends. Usually frontends are docked in the same page, although
# some may be in tabs (undocked, xwalk://inspect, the DevTools
# discovery page, etc.). Tabs can be closed via the DevTools HTTP close
# URL, but docked frontends can only be closed, by design, by connecting
# to them and clicking the close button. Close the tab frontends first
# in case one of them is debugging a docked frontend, which would prevent
# the code from being able to connect to the docked one.
tab_frontend_ids = []
docked_frontend_ids = []
for view_info in views_info.views_info:
if view_info.IsFrontend():
if view_info.typer == WebViewInfo.kPage:
tab_frontend_ids.append(view_info.sid)
elif view_info.typer == WebViewInfo.kOther:
docked_frontend_ids.append(view_info.sid)
else:
return Status(kUnknownError, "unknown type of DevTools frontend")
for i in tab_frontend_ids:
status = self.CloseWebView(i)
if status.IsError():
return status
for i in docked_frontend_ids:
client = DevToolsClientImpl(self.socket_factory, \
self.web_socket_url_prefix + i, \
i, \
Bind(FakeCloseFrontends))
web_view = WebViewImpl(i, self.build_no, client)
status = web_view.ConnectIfNecessary()
# Ignore disconnected error, because the debugger might have closed when
# its container page was closed above.
if status.IsError() and status.Code() != kDisconnected:
return status
status, result = web_view.EvaluateScript("", "document.querySelector('*[id^=\"close-button-\"]').click();")
# Ignore disconnected error, because it may be closed already.
if status.IsError() and status.Code() != kDisconnected:
return status
# Wait until DevTools UI disconnects from the given web view.
deadline = time.time() + 20
while time.time() < deadline:
status = self.GetWebViewsInfo(view_info)
if status.IsError():
return status
view_info = views_info.GetForId(for_client_id)
if not view_info:
return Status(kNoSuchWindow, "window was already closed")
if len(view_info.debugger_url):
return Status(kOk)
time.sleep(0.050)
return Status(kUnknownError, "failed to close UI debuggers")
# return bool and response<list>
def FetchUrlAndLog(self, url):
#VLOG(1, "devtools request: " + url)
try:
response = urllib2.urlopen(url)
response = yaml.load(response)
except:
#VLOG(1, "devtools request failed")
return False, []
#VLOG(1, "devtools response: " + str(response))
return True, response
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/devtools_http_client.py",
"copies": "1",
"size": "9627",
"license": "bsd-3-clause",
"hash": -8346731058455586000,
"line_mean": 33.6294964029,
"line_max": 113,
"alpha_frac": 0.6502544926,
"autogenerated": false,
"ratio": 3.600224382946896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47504788755468963,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DictExtractionTransformer', 'DictListExtractionTransformer', 'FunctionTransformer', 'IdentityTransformer']
from . import PureTransformer
class DictExtractionTransformer(PureTransformer):
"""Extract a given key from dictionary object."""
def __init__(self, key=None, default=None, **kwargs):
kwargs.setdefault('nparray', False)
super(DictExtractionTransformer, self).__init__(**kwargs)
self.key = key
self.default = default
#end def
def transform_one(self, d, **kwargs): return d.get(self.key, self.default)
#end class
class DictListExtractionTransformer(PureTransformer):
"""Extract a given key from list of dictionary object."""
def __init__(self, key=None, default=None, **kwargs):
super(DictListExtractionTransformer, self).__init__(**kwargs)
self.key = key
self.default = default
#end def
def transform_one(self, L, **kwargs): return [d.get(self.key, self.default) for d in L]
#end class
class FunctionTransformer(PureTransformer):
def __init__(self, func, **kwargs):
super(FunctionTransformer, self).__init__(**kwargs)
self.func = func
#end def
def transform_one(self, x, **kwargs):
return self.func(x)
#end class
class IdentityTransformer(PureTransformer):
def transform_one(self, x, **kwargs):
return x
#end class
| {
"repo_name": "skylander86/ycml",
"path": "ycml/transformers/misc.py",
"copies": "1",
"size": "1385",
"license": "apache-2.0",
"hash": -7819589387013101000,
"line_mean": 27.2653061224,
"line_max": 118,
"alpha_frac": 0.6685920578,
"autogenerated": false,
"ratio": 3.9571428571428573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5125734914942858,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DirectEntryScroll']
from panda3d.core import *
from . import DirectGuiGlobals as DGG
from .DirectScrolledFrame import *
from .DirectFrame import *
from .DirectEntry import *
class DirectEntryScroll(DirectFrame):
def __init__(self, entry, parent = None, **kw):
optiondefs = (
('pgFunc', PGVirtualFrame, None),
('relief', None, None),
('clipSize', (-1, 1, -1, 1), self.setClipSize),
)
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent, **kw)
self.canvas = None
self.visXMin = 0.0
self.visXMax = 0.0
self.clipXMin = 0.0
self.clipXMax = 0.0
self.initialiseoptions(DirectEntryScroll)
# don't set a scale on the entry
# instead make it the correct size, use something like:
# text_scale = 0.035,
# frameSize = (-0.006, 3.2, -0.015, 0.036),
# if you need to scale the entry scale it's parent instead
self.entry = entry
self.canvas = NodePath(self.guiItem.getCanvasNode())
self.entry.reparentTo(self.canvas)
self.canvas.setPos(0,0,0)
self.entry.bind(DGG.CURSORMOVE,self.cursorMove)
self.canvas.node().setBounds(OmniBoundingVolume())
self.canvas.node().setFinal(1)
self.resetCanvas()
def cursorMove(self, cursorX, cursorY):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.clipXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if abs(distanceToCenter) > (clipExtent * 0.5):
self.moveToCenterCursor()
def moveToCenterCursor(self):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.clipXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
newX = canvasX + distanceToCenter
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if self.entry.guiItem.getCursorPosition() <= 0: #deals with the cursor jump bug
newX = 0.0
elif newX > 0.0:
newX = 0.0
elif newX < (-entryWiggle):
newX = -entryWiggle
#print("CursorX %s CanvasX %s VisCenter %s Distance %s NewX %s Wiggle %s" % (cursorX, canvasX, visXCenter, distanceToCenter, newX, entryWiggle))
self.canvas.setX(newX)
def destroy(self):
# Destroy children of the canvas
for child in self.canvas.getChildren():
childGui = self.guiDict.get(child.getName())
if childGui:
childGui.destroy()
else:
parts = child.getName().split('-')
simpleChildGui = self.guiDict.get(parts[-1])
if simpleChildGui:
simpleChildGui.destroy()
self.entry.destroy()
self.entry = None
DirectFrame.destroy(self)
def getCanvas(self):
return self.canvas
def setClipSize(self):
self.guiItem.setClipFrame(self['clipSize'])
self.clipXMin = self['clipSize'][0]
self.clipXMax = self['clipSize'][1]
self.visXMin = self.clipXMin
self.visXMax = self.clipXMax
if self.canvas:
self.resetCanvas()
def resetCanvas(self):
self.canvas.setPos(0,0,0)
| {
"repo_name": "chandler14362/panda3d",
"path": "direct/src/gui/DirectEntryScroll.py",
"copies": "10",
"size": "3883",
"license": "bsd-3-clause",
"hash": -2633922198941115000,
"line_mean": 32.188034188,
"line_max": 152,
"alpha_frac": 0.5928405872,
"autogenerated": false,
"ratio": 3.5854108956602033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9178251482860204,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DirectEntryScroll']
from panda3d.core import *
import DirectGuiGlobals as DGG
from DirectScrolledFrame import *
from DirectFrame import *
from DirectEntry import *
class DirectEntryScroll(DirectFrame):
def __init__(self, entry, parent = None, **kw):
optiondefs = (
('pgFunc', PGVirtualFrame, None),
('relief', None, None),
('clipSize', (-1, 1, -1, 1), self.setClipSize),
)
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent, **kw)
self.canvas = None
self.visXMin = 0.0
self.visXMax = 0.0
self.clipXMin = 0.0
self.clipXMax = 0.0
self.initialiseoptions(DirectEntryScroll)
# don't set a scale on the entry
# instead make it the correct size, use something like:
# text_scale = 0.035,
# frameSize = (-0.006, 3.2, -0.015, 0.036),
# if you need to scale the entry scale it's parent instead
self.entry = entry
self.canvas = NodePath(self.guiItem.getCanvasNode())
self.entry.reparentTo(self.canvas)
self.canvas.setPos(0,0,0)
self.entry.bind(DGG.CURSORMOVE,self.cursorMove)
self.canvas.node().setBounds(OmniBoundingVolume())
self.canvas.node().setFinal(1)
self.resetCanvas()
def cursorMove(self, cursorX, cursorY):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.clipXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if abs(distanceToCenter) > (clipExtent * 0.5):
self.moveToCenterCursor()
def moveToCenterCursor(self):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.clipXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
newX = canvasX + distanceToCenter
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if self.entry.guiItem.getCursorPosition() <= 0: #deals with the cursor jump bug
newX = 0.0
elif newX > 0.0:
newX = 0.0
elif newX < (-entryWiggle):
newX = -entryWiggle
#print("CursorX %s CanvasX %s VisCenter %s Distance %s NewX %s Wiggle %s" % (cursorX, canvasX, visXCenter, distanceToCenter, newX, entryWiggle))
self.canvas.setX(newX)
def destroy(self):
# Destroy children of the canvas
for child in self.canvas.getChildren():
childGui = self.guiDict.get(child.getName())
if childGui:
childGui.destroy()
else:
parts = child.getName().split('-')
simpleChildGui = self.guiDict.get(parts[-1])
if simpleChildGui:
simpleChildGui.destroy()
self.entry.destroy()
self.entry = None
DirectFrame.destroy(self)
def getCanvas(self):
return self.canvas
def setClipSize(self):
self.guiItem.setClipFrame(self['clipSize'])
self.clipXMin = self['clipSize'][0]
self.clipXMax = self['clipSize'][1]
self.visXMin = self.clipXMin
self.visXMax = self.clipXMax
if self.canvas:
self.resetCanvas()
def resetCanvas(self):
self.canvas.setPos(0,0,0)
| {
"repo_name": "sctigercat1/panda3d",
"path": "direct/src/gui/DirectEntryScroll.py",
"copies": "8",
"size": "4043",
"license": "bsd-3-clause",
"hash": 653566241581268900,
"line_mean": 33.5555555556,
"line_max": 152,
"alpha_frac": 0.5683898095,
"autogenerated": false,
"ratio": 3.7331486611265006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.83015384706265,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DirectEntryScroll']
from pandac.PandaModules import *
import DirectGuiGlobals as DGG
from DirectScrolledFrame import *
from DirectFrame import *
from DirectEntry import *
class DirectEntryScroll(DirectFrame):
def __init__(self, entry, parent = None, **kw):
optiondefs = (
('pgFunc', PGVirtualFrame, None),
('relief', None, None),
('clipSize', (-1, 1, -1, 1), self.setClipSize),
)
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent, **kw)
self.canvas = None
self.visXMin = 0.0
self.visXMax = 0.0
self.clipXMin = 0.0
self.clipXMax = 0.0
self.initialiseoptions(DirectEntryScroll)
# don't set a scale on the entry
# instead make it the correct size, use something like:
# text_scale = 0.035,
# frameSize = (-0.006, 3.2, -0.015, 0.036),
# if you need to scale the entry scale it's parent instead
self.entry = entry
self.canvas = NodePath(self.guiItem.getCanvasNode())
self.entry.reparentTo(self.canvas)
self.canvas.setPos(0,0,0)
self.entry.bind(DGG.CURSORMOVE,self.cursorMove)
self.canvas.node().setBounds(OmniBoundingVolume())
self.canvas.node().setFinal(1)
self.resetCanvas()
def cursorMove(self, cursorX, cursorY):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.clipXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if abs(distanceToCenter) > (clipExtent * 0.5):
self.moveToCenterCursor()
def moveToCenterCursor(self):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.clipXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
newX = canvasX + distanceToCenter
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if self.entry.guiItem.getCursorPosition() <= 0: #deals with the cursor jump bug
newX = 0.0
elif newX > 0.0:
newX = 0.0
elif newX < (-entryWiggle):
newX = -entryWiggle
#print("CursorX %s CanvasX %s VisCenter %s Distance %s NewX %s Wiggle %s" % (cursorX, canvasX, visXCenter, distanceToCenter, newX, entryWiggle))
self.canvas.setX(newX)
def destroy(self):
# Destroy children of the canvas
for child in self.canvas.getChildren():
childGui = self.guiDict.get(child.getName())
if childGui:
childGui.destroy()
else:
parts = child.getName().split('-')
simpleChildGui = self.guiDict.get(parts[-1])
if simpleChildGui:
simpleChildGui.destroy()
self.entry.destroy()
self.entry = None
DirectFrame.destroy(self)
def getCanvas(self):
return self.canvas
def setClipSize(self):
self.guiItem.setClipFrame(self['clipSize'])
self.clipXMin = self['clipSize'][0]
self.clipXMax = self['clipSize'][1]
self.visXMin = self.clipXMin
self.visXMax = self.clipXMax
if self.canvas:
self.resetCanvas()
def resetCanvas(self):
self.canvas.setPos(0,0,0)
| {
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"path": "Panda3D-1.9.0/direct/gui/DirectEntryScroll.py",
"copies": "4",
"size": "4075",
"license": "mit",
"hash": -3242939860245052400,
"line_mean": 33.2521008403,
"line_max": 152,
"alpha_frac": 0.5656441718,
"autogenerated": false,
"ratio": 3.7523020257826887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.032091141201511506,
"num_lines": 119
} |
__all__ = ('discover_item',)
def discover_item(title='Link Bridge'):
return dict(title=title, action='discover.py', actionReturnsItems=True)
def item_for_description(desc):
device_info = desc['device']
return dict(
action='connect.py',
actionArgument=desc['URLBase'] + '#' + device_info['serialNumber'],
title='Link to ' + device_info['friendlyName'],
icon='bridge_v1.pdf' if device_info['modelNumber'] == '929000226503'
else 'bridge_v2.pdf',
iconIsTemplate=True,
actionReturnsItems=True)
def nupnp_discover():
import requests
from netdisco.util import etree_to_dict
from xml.etree import ElementTree
items = []
try:
response = requests.get('https://www.meethue.com/api/nupnp', timeout=8)
bridges = response.json()
for bridge in bridges:
url = 'http://%s/description.xml' % bridge['internalipaddress']
xml = ElementTree.fromstring(requests.get(url, timeout=1).text)
description = etree_to_dict(xml)['root']
items.append(item_for_description(description))
except requests.exceptions.RequestException:
return []
return items
def ssdp_discover():
# yuck; alternative would be to mock netdis and scan SSDP directly
import netdisco.ssdp
original_scan = netdisco.ssdp.scan
def scan(st=None, timeout=2, max_entries=None):
return original_scan(st, timeout, max_entries)
netdisco.ssdp.scan = scan
from netdisco.discovery import NetworkDiscovery
discovery = NetworkDiscovery(limit_discovery=['philips_hue'])
# again yuck, but limit_discovery doesn't work backwards
discovery.is_discovering = True
discovery.ssdp.scan()
discovery.is_discovering = False
return [item_for_description(entry.description)
for entry in discovery.get_entries('philips_hue')
if entry.st == 'upnp:rootdevice']
if __name__ == '__main__':
import os.path
activate_this = os.path.join(os.path.dirname(__file__),
'bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import json
items = nupnp_discover()
if not items:
items = ssdp_discover()
if not items:
items = discover_item('No bridges found. Rescan for bridges?')
print json.dumps(items)
| {
"repo_name": "nriley/LBHue",
"path": "Hue.lbaction/Contents/Scripts/discover.py",
"copies": "1",
"size": "2378",
"license": "apache-2.0",
"hash": -139266331606127890,
"line_mean": 32.0277777778,
"line_max": 79,
"alpha_frac": 0.6446593776,
"autogenerated": false,
"ratio": 3.7448818897637794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.987526408685396,
"avg_score": 0.0028554361019638794,
"num_lines": 72
} |
__all__ = ['DiscoverProtocolCommand']
import time
import random
from print_colors import PrintColors
from contact import Contact
from protocol_command import ProtocolCommand
class DiscoverProtocolCommand(ProtocolCommand):
def start(self):
self.req()
def stop(self):
raise NotImplementedError
def req(self):
# request
c = self.node.rt.contacts.random(without_id=self.node.id)
if not c or c.id is None:
self.node.loop.call_later(5.0 + random.random() * 5.0, self.req)
return
# print('discover_nodes:', c)
node_id = self.node.id
node_local_host = self.node.listen_host
node_local_port = self.node.listen_port
args = ()
kwargs = {
'id': node_id,
'local_host': node_local_host,
'local_port': node_local_port,
}
res = (args, kwargs)
# build message
message_data = self.node.build_message(
self.protocol_major_version,
self.protocol_minor_version,
self.PROTOCOL_REQ,
self.protocol_command_code,
res,
)
# force del
del args
del kwargs
del res
# send message
self.node.send_message(message_data, c.remote_host, c.remote_port)
# schedule next discover
self.node.loop.call_later(0.0 + random.random() * 10.0, self.req)
def on_req(self, remote_host, remote_port, *args, **kwargs):
node_id = kwargs['id']
local_host = kwargs['local_host']
local_port = kwargs['local_port']
bootstrap = kwargs.get('bootstrap', False)
# update contact's `last_seen`, or add contact
c = self.node.rt.contacts.get(node_id)
if c:
c.id = node_id
c.last_seen = time.time()
else:
c = self.node.rt.contacts.get((remote_host, remote_port))
if c:
c.id = node_id
c.last_seen = time.time()
else:
# add_contact
c = self.node.rt.add_contacts.get(node_id)
if c:
self.node.rt.add_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [DISCOVERY REQ]:', self.node, c, PrintColors.END)
else:
c = self.node.rt.add_contacts.get((remote_host, remote_port))
if c:
self.node.rt.add_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [DISCOVERY REQ]:', self.node, c, PrintColors.END)
else:
# remove_contact
c = self.node.rt.remove_contacts.get(node_id)
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [DISCOVERY REQ]:', self.node, c, PrintColors.END)
else:
c = self.node.rt.remove_contacts.get((remote_host, remote_port))
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [DISCOVERY REQ]:', self.node, c, PrintColors.END)
else:
c = Contact(
id = node_id,
local_host = local_host,
local_port = local_port,
remote_host = remote_host,
remote_port = remote_port,
bootstrap = bootstrap,
)
# because `c` is requesting to discover nodes
# put it into known active contacts
c.last_seen = time.time()
self.node.rt.contacts.add(c)
print(PrintColors.GREEN + 'new contact [DISCOVERY REQ]:', self.node, c, PrintColors.END)
# forward to res_discover_nodes
self.res(remote_host, remote_port, *args, **kwargs)
def res(self, remote_host, remote_port, *args, **kwargs):
# response
node_id = self.node.id
local_host = self.node.listen_host
local_port = self.node.listen_port
contacts = [c.__getstate__() for c in self.node.rt.contacts]
res = {
'id': node_id,
'local_host': local_host,
'local_port': local_port,
'contacts': contacts,
}
# build message
message_data = self.node.build_message(
self.protocol_major_version,
self.protocol_minor_version,
self.PROTOCOL_RES,
self.protocol_command_code,
res,
)
# force del
del contacts
del res
# send message
self.node.send_message(message_data, remote_host, remote_port)
def on_res(self, remote_host, remote_port, res):
node_id = res['id']
local_host = res['local_host']
local_port = res['local_port']
contacts = res['contacts']
bootstrap = res.get('bootstrap', False)
# update contact's `last_seen`, or add contact
c = self.node.rt.contacts.get(node_id)
if c:
c.id = node_id
c.last_seen = time.time()
else:
c = self.node.rt.contacts.get((remote_host, remote_port))
if c:
c.id = node_id
c.last_seen = time.time()
else:
# add_contact
c = self.node.rt.add_contacts.get(node_id)
if c:
self.node.rt.add_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [DISCOVERY ON RES]:', self.node, c, PrintColors.END)
else:
c = self.node.rt.add_contacts.get((remote_host, remote_port))
if c:
self.node.rt.add_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [DISCOVERY ON RES]:', self.node, c, PrintColors.END)
else:
# remove_contact
c = self.node.rt.remove_contacts.get(node_id)
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [DISCOVERY ON RES]:', self.node, c, PrintColors.END)
else:
c = self.node.rt.remove_contacts.get((remote_host, remote_port))
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [DISCOVERY ON RES]:', self.node, c, PrintColors.END)
else:
c = Contact(
id = node_id,
local_host = local_host,
local_port = local_port,
remote_host = remote_host,
remote_port = remote_port,
bootstrap = bootstrap,
)
# because `c` is requesting to discover nodes
# put it into known active contacts
c.last_seen = time.time()
self.node.rt.contacts.add(c)
print(PrintColors.GREEN + 'new contact [DISCOVERY ON RES]:', self.node, c, PrintColors.END)
# update discovered nodes/contacts
for cd in contacts:
node_id = cd['id']
local_host = cd['local_host']
local_port = cd['local_port']
remote_host = cd['remote_host']
remote_port = cd['remote_port']
bootstrap = cd.get('bootstrap', False)
# update contact's `last_seen`, or add contact
c = self.node.rt.contacts.get(node_id)
if c:
c.id = node_id
else:
c = self.node.rt.contacts.get((remote_host, remote_port))
if c:
c.id = node_id
else:
# add_contact
c = self.node.rt.add_contacts.get(node_id)
if c:
c.id = node_id
else:
c = self.node.rt.add_contacts.get((remote_host, remote_port))
if c:
c.id = node_id
else:
# remove_contact
c = self.node.rt.remove_contacts.get(node_id)
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.add_contacts.add(c)
c.id = node_id
else:
c = self.node.rt.remove_contacts.get((remote_host, remote_port))
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.add_contacts.add(c)
c.id = node_id
else:
c = Contact(
id = node_id,
local_host = local_host,
local_port = local_port,
remote_host = remote_host,
remote_port = remote_port,
bootstrap = bootstrap,
)
# because `c` is requesting to discover nodes
# put it into known active contacts
c.last_seen = time.time()
self.node.rt.add_contacts.add(c)
| {
"repo_name": "mtasic85/routingtable",
"path": "discover_protocol_command.py",
"copies": "1",
"size": "11809",
"license": "mit",
"hash": -1829746817748145000,
"line_mean": 38.8952702703,
"line_max": 123,
"alpha_frac": 0.4192564993,
"autogenerated": false,
"ratio": 4.647382920110193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5566639419410193,
"avg_score": null,
"num_lines": null
} |
__all__ = ['distributeMUSICBndryPart']
import struct
import numpy as np
_header_fmt = '6i6dddii6Iiiddddii6Ii60s'
_small = 1024*256 #256KB
def distributeMUSICBndryPart(fname_in, fname_out=None, long_ids=False, \
coarse_type=5, distribute=(1,1,1,-1)):
my_dist = map(int, distribute)
if len(my_dist) != 4:
raise ValueError('distribute must be an iterable of length 4.')
neg = filter(lambda x: x<0, my_dist)
if len(neg) > 1:
raise ValueError('There can be at most one element in distribute with negative value.')
elif len(neg) == 1:
rest = my_dist.index(neg[0])
else:
rest = None
with open(fname_in, 'rb') as f:
f.seek(4, 1)
header = list(struct.unpack(_header_fmt, f.read(256)))
f.seek(4, 1)
#
types = range(0, 6)
types.remove(1)
types.remove(coarse_type)
if any([header[i] for i in types]) or header[23] != 1:
raise ValueError('Issues in this IC file.')
#
pos_vel_id = (header[1]+header[coarse_type])*(7+int(long_ids))*4 + 24
f.seek(pos_vel_id, 1)
#
f.seek(4, 1)
m = np.fromfile(f, np.float32, header[coarse_type])
f.seek(4, 1)
#
m_sep = np.where(m[1:]-m[:-1])[0] + 1
m_sep = [0] + m_sep.tolist() + [len(m)]
ntype = len(m_sep) - 1
#
if rest is None and ntype != sum(my_dist):
raise ValueError('Sum of distribute must equal the total number of coarse levels. Otherwise use -1 for one of the element in distribute.')
if rest is not None:
my_dist[rest] += ntype - sum(my_dist)
if my_dist[rest] < 0:
raise ValueError('Sum of distribute larger than the total number of coarse levels.')
#
count = 0
m_slices = []
m_size = 0
for i, nt in zip(range(2, 6), my_dist):
if nt == 0:
header[i] = 0
header[i+6] = 0.
else:
header[i] = m_sep[count+nt] - m_sep[count]
if nt > 1:
header[i+6] = 0.
m_slices.append(slice(m_sep[count], m_sep[count+nt]))
m_size += header[i]
else:
header[i+6] = float(m[m_sep[count]])
count += nt
#
for i in range(2,6):
header[i+16] = header[i]
#
f.seek(0, 0)
if fname_out is None:
fname_out = fname_in + '.out'
with open(fname_out, 'wb') as fo:
fo.write(f.read(4))
fo.write(struct.pack(_header_fmt, *header))
f.seek(256, 1)
fo.write(f.read(4))
#
for i in range(pos_vel_id/_small):
fo.write(f.read(_small))
fo.write(f.read(pos_vel_id%_small))
#
if m_size:
s = np.array([m_size*4], dtype=np.int32)
s.tofile(fo)
for sl in m_slices:
m[sl].tofile(fo)
s.tofile(fo)
#
def main():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('fname_in')
parser.add_argument('-o', dest='fname_out')
parser.add_argument('-l', dest='long_ids', action='store_true')
parser.add_argument('-t', dest='coarse_type', type=int, default=5)
parser.add_argument('-d', dest='distribute', default='1,1,1,-1')
args = parser.parse_args()
distributeMUSICBndryPart(args.fname_in, args.fname_out, args.long_ids, \
args.coarse_type, args.distribute.split(','))
if __name__ == "__main__":
main()
| {
"repo_name": "manodeep/yymao-helpers",
"path": "helpers/distributeMUSICBndryPart.py",
"copies": "1",
"size": "3685",
"license": "mit",
"hash": 6346009827898917000,
"line_mean": 34.4326923077,
"line_max": 150,
"alpha_frac": 0.5115332429,
"autogenerated": false,
"ratio": 3.2901785714285716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9270473885718049,
"avg_score": 0.0062475857221043676,
"num_lines": 104
} |
__all__ = ['Distribution']
import re
from distutils.core import Distribution as _Distribution
from setuptools.depends import Require
from setuptools.command.install import install
from setuptools.command.sdist import sdist
from setuptools.command.install_lib import install_lib
from distutils.errors import DistutilsOptionError, DistutilsPlatformError
from distutils.errors import DistutilsSetupError
import setuptools, pkg_resources, distutils.core, distutils.dist, distutils.cmd
import os, distutils.log
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"%r is declared as a package namespace, but %r is not:"
" please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in list(value.items()):
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
raise DistutilsSetupError(
"%r must be a boolean value (got %r)" % (attr,value)
)
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError,ValueError):
raise DistutilsSetupError(
"%r must be a string or list of strings "
"containing valid project/version requirement specifiers" % (attr,)
)
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,str):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in list(value.items()):
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' -- a dictionary mapping option names to 'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__ (self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs.pop('setup_requires'))
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, (int,float)):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
from pkg_resources import working_set, parse_requirements
for dist in working_set.resolve(
parse_requirements(requires), installer=self.fetch_build_egg
):
working_set.add(dist)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts.keys()):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
cmd = easy_install(
dist, args=["x"], install_dir=os.curdir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in list(self.features.items()):
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in list(self.features.items()):
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in list(self.features.items()):
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
cmdclass = ep.load(False) # don't require extras, we're not running
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in list(attrs.items()):
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in list(attrs.items()):
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in list(self.command_options.items()):
for opt,(src,val) in list(opts.items()):
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in list(neg_opt.items()):
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if sys.version_info < (3,) or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras
):
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
| {
"repo_name": "Drvanon/Game",
"path": "venv/lib/python3.3/site-packages/distribute-0.6.34-py3.3.egg/setuptools/dist.py",
"copies": "1",
"size": "31852",
"license": "apache-2.0",
"hash": 4082603245588716000,
"line_mean": 36.2538011696,
"line_max": 97,
"alpha_frac": 0.6132111013,
"autogenerated": false,
"ratio": 4.537968371562901,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5651179472862901,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Distribution']
import re
import os
import sys
import warnings
import distutils.log
import distutils.core
import distutils.cmd
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.depends import Require
from setuptools.compat import numeric_types, basestring
import pkg_resources
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
raise DistutilsSetupError(
"%r must be a boolean value (got %r)" % (attr,value)
)
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError,ValueError):
raise DistutilsSetupError(
"%r must be a string or list of strings "
"containing valid project/version requirement specifiers" % (attr,)
)
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError:
e = sys.exc_info()[1]
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,basestring):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs.pop('setup_requires'))
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numeric_types):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
from pkg_resources import working_set, parse_requirements
for dist in working_set.resolve(
parse_requirements(requires), installer=self.fetch_build_egg
):
working_set.add(dist)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
cmd = easy_install(
dist, args=["x"], install_dir=os.curdir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
cmdclass = ep.load(False) # don't require extras, we're not running
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if sys.version_info < (3,) or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://bitbucket.org/pypa/setuptools/issue/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See http://bitbucket.org/pypa/setuptools/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
| {
"repo_name": "KhalidGit/flask",
"path": "Work/Trivia - Module 5/env/Lib/site-packages/setuptools/dist.py",
"copies": "72",
"size": "32547",
"license": "apache-2.0",
"hash": -5675795066891701000,
"line_mean": 39.8881909548,
"line_max": 97,
"alpha_frac": 0.612928995,
"autogenerated": false,
"ratio": 4.5367995539448005,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036364027374292396,
"num_lines": 796
} |
__all__ = ['Distribution']
import re
import os
import sys
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.depends import Require
from setuptools.compat import basestring, PY2
from setuptools import windows_support
import pkg_resources
packaging = pkg_resources.packaging
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
distutils.dist.DistributionMetadata.write_pkg_info = write_pkg_info
_patch_distribution_metadata_write_pkg_info()
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,basestring):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://bitbucket.org/pypa/setuptools/issue/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See http://bitbucket.org/pypa/setuptools/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
| {
"repo_name": "Cian47/anti_bicycle_theft",
"path": "python-api/env/lib/python3.5/site-packages/setuptools/dist.py",
"copies": "259",
"size": "35320",
"license": "mit",
"hash": -7030637806771407000,
"line_mean": 39.8796296296,
"line_max": 97,
"alpha_frac": 0.6095979615,
"autogenerated": false,
"ratio": 4.507977026164646,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Distribution']
import re
import os
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from distutils.util import rfc822_escape
from setuptools.extern import six
from setuptools.extern.six.moves import map
from pkg_resources.extern import packaging
from setuptools.depends import Require
from setuptools import windows_support
from setuptools.monkey import get_unpatched
from setuptools.config import parse_configuration
import pkg_resources
from .py36compat import Distribution_parse_config_files
def _get_unpatched(cls):
warnings.warn("Do not call this function", DeprecationWarning)
return get_unpatched(cls)
# Based on Python 3.5 version
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
version = '1.2'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name())
file.write('Version: %s\n' % self.get_version())
file.write('Summary: %s\n' % self.get_description())
file.write('Home-page: %s\n' % self.get_url())
file.write('Author: %s\n' % self.get_contact())
file.write('Author-email: %s\n' % self.get_contact_email())
file.write('License: %s\n' % self.get_license())
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape(self.get_long_description())
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords)
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
file.write('Requires-Python: %s\n' % self.python_requires)
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x=' + value)
assert not ep.extras
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr, value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
parent, sep, child = nsp.rpartition('.')
if parent and parent not in ns_packages:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k, v in value.items():
if ':' in k:
k, m = k.split(':', 1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: " + m)
list(pkg_resources.parse_requirements(v))
except (TypeError, ValueError, AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
try:
packaging.specifiers.SpecifierSet(value)
except packaging.specifiers.InvalidSpecifier as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid version specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value, six.string_types):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value, dict):
for k, v in value.items():
if not isinstance(k, str):
break
try:
iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr + " must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py", pkgname
)
_Distribution = get_unpatched(distutils.core.Distribution)
class Distribution(Distribution_parse_config_files, _Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self, 'dependency_links', self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
vars(self).setdefault(ep.name, None)
_Distribution.__init__(self, attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
def parse_config_files(self, filenames=None):
"""Parses configuration files from various levels
and loads configuration.
"""
_Distribution.parse_config_files(self, filenames=filenames)
parse_configuration(self, self.command_options)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self, name):
"""Convert feature name to corresponding option attribute name"""
return 'with_' + name.replace('-', '_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
return resolved_dists
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self, ep.name, None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args': ['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name, feature in self.features.items():
self._set_feature(name, None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef = ''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-' + name, None, 'include ' + descr + incdef))
go.append(('without-' + name, None, 'exclude ' + descr + excdef))
no['without-' + name] = 'with-' + name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name, feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name, 1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name, feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name, 0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands', command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def get_command_list(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.get_command_list(self)
def _set_feature(self, name, status):
"""Set feature's inclusion status"""
setattr(self, self._feature_attrname(name), status)
def feature_is_included(self, name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self, self._feature_attrname(name))
def include_feature(self, name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name) == 0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name, 1)
def include(self, **attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k, v in attrs.items():
include = getattr(self, '_include_' + k, None)
if include:
include(v)
else:
self._include_misc(k, v)
def exclude_package(self, package):
"""Remove packages, modules, and extensions in named package"""
pfx = package + '.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self, package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package + '.'
for p in self.iter_distribution_names():
if p == package or p.startswith(pfx):
return True
def _exclude_misc(self, name, value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self, name, [item for item in old if item not in value])
def _include_misc(self, name, value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self, name, value)
elif not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
else:
setattr(self, name, old + [item for item in value if item not in old])
def exclude(self, **attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k, v)
def _exclude_packages(self, packages):
if not isinstance(packages, sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src, alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias, True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class, 'command_consumes_arguments', None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd, opts in self.command_options.items():
for opt, (src, val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_', '-')
if val == 0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
for neg, pos in neg_opt.items():
if pos == opt:
opt = neg
val = None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val == 1:
val = None
d.setdefault(cmd, {})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext, tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if six.PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See https://github.com/pypa/setuptools/issues/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features, (str, Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r, str)
]
er = [r for r in require_features if not isinstance(r, str)]
if er:
extras['require_features'] = er
if isinstance(remove, str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self, dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description + " is required, "
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self, dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self, dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
| {
"repo_name": "AccelAI/accel.ai",
"path": "flask-aws/lib/python2.7/site-packages/setuptools/dist.py",
"copies": "18",
"size": "37548",
"license": "mit",
"hash": 8119696690248872000,
"line_mean": 39.5924324324,
"line_max": 97,
"alpha_frac": 0.6097528497,
"autogenerated": false,
"ratio": 4.440397350993377,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016178722458335686,
"num_lines": 925
} |
__all__ = ['Distribution']
import re
import os
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
import itertools
from collections import defaultdict
from distutils.errors import (
DistutilsOptionError, DistutilsPlatformError, DistutilsSetupError,
)
from distutils.util import rfc822_escape
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter, filterfalse
from pkg_resources.extern import packaging
from setuptools.depends import Require
from setuptools import windows_support
from setuptools.monkey import get_unpatched
from setuptools.config import parse_configuration
import pkg_resources
from .py36compat import Distribution_parse_config_files
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.version')
def _get_unpatched(cls):
warnings.warn("Do not call this function", DeprecationWarning)
return get_unpatched(cls)
# Based on Python 3.5 version
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
version = '1.2'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name())
file.write('Version: %s\n' % self.get_version())
file.write('Summary: %s\n' % self.get_description())
file.write('Home-page: %s\n' % self.get_url())
file.write('Author: %s\n' % self.get_contact())
file.write('Author-email: %s\n' % self.get_contact_email())
file.write('License: %s\n' % self.get_license())
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape(self.get_long_description())
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords)
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
file.write('Requires-Python: %s\n' % self.python_requires)
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x=' + value)
assert not ep.extras
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr, value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
parent, sep, child = nsp.rpartition('.')
if parent and parent not in ns_packages:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
list(itertools.starmap(_check_extra, value.items()))
except (TypeError, ValueError, AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def _check_extra(extra, reqs):
name, sep, marker = extra.partition(':')
if marker and pkg_resources.invalid_marker(marker):
raise DistutilsSetupError("Invalid environment marker: " + marker)
list(pkg_resources.parse_requirements(reqs))
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
try:
packaging.specifiers.SpecifierSet(value)
except packaging.specifiers.InvalidSpecifier as error:
tmpl = (
"{attr!r} must be a string "
"containing valid version specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value, six.string_types):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value, dict):
for k, v in value.items():
if not isinstance(k, str):
break
try:
iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr + " must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py", pkgname
)
_Distribution = get_unpatched(distutils.core.Distribution)
class Distribution(Distribution_parse_config_files, _Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self, 'dependency_links', self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
vars(self).setdefault(ep.name, None)
_Distribution.__init__(self, attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
self._finalize_requires()
def _finalize_requires(self):
"""
Set `metadata.python_requires` and fix environment markers
in `install_requires` and `extras_require`.
"""
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
self._convert_extras_requirements()
self._move_install_requirements_markers()
def _convert_extras_requirements(self):
"""
Convert requirements in `extras_require` of the form
`"extra": ["barbazquux; {marker}"]` to
`"extra:{marker}": ["barbazquux"]`.
"""
spec_ext_reqs = getattr(self, 'extras_require', None) or {}
self._tmp_extras_require = defaultdict(list)
for section, v in spec_ext_reqs.items():
# Do not strip empty sections.
self._tmp_extras_require[section]
for r in pkg_resources.parse_requirements(v):
suffix = self._suffix_for(r)
self._tmp_extras_require[section + suffix].append(r)
@staticmethod
def _suffix_for(req):
"""
For a requirement, return the 'extras_require' suffix for
that requirement.
"""
return ':' + str(req.marker) if req.marker else ''
def _move_install_requirements_markers(self):
"""
Move requirements in `install_requires` that are using environment
markers `extras_require`.
"""
# divide the install_requires into two sets, simple ones still
# handled by install_requires and more complex ones handled
# by extras_require.
def is_simple_req(req):
return not req.marker
spec_inst_reqs = getattr(self, 'install_requires', None) or ()
inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))
simple_reqs = filter(is_simple_req, inst_reqs)
complex_reqs = filterfalse(is_simple_req, inst_reqs)
self.install_requires = list(map(str, simple_reqs))
for r in complex_reqs:
self._tmp_extras_require[':' + str(r.marker)].append(r)
self.extras_require = dict(
(k, [str(r) for r in map(self._clean_req, v)])
for k, v in self._tmp_extras_require.items()
)
def _clean_req(self, req):
"""
Given a Requirement, remove environment markers and return it.
"""
req.marker = None
return req
def parse_config_files(self, filenames=None):
"""Parses configuration files from various levels
and loads configuration.
"""
_Distribution.parse_config_files(self, filenames=filenames)
parse_configuration(self, self.command_options)
self._finalize_requires()
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self, name):
"""Convert feature name to corresponding option attribute name"""
return 'with_' + name.replace('-', '_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
return resolved_dists
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self, ep.name, None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [
os.path.abspath(p)
for p in self.convert_2to3_doctests
]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args': ['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir,
exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name, feature in self.features.items():
self._set_feature(name, None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef = ''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
new = (
('with-' + name, None, 'include ' + descr + incdef),
('without-' + name, None, 'exclude ' + descr + excdef),
)
go.extend(new)
no['without-' + name] = 'with-' + name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name, feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name, 1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name, feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name, 0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
eps = pkg_resources.iter_entry_points('distutils.commands', command)
for ep in eps:
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def get_command_list(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.get_command_list(self)
def _set_feature(self, name, status):
"""Set feature's inclusion status"""
setattr(self, self._feature_attrname(name), status)
def feature_is_included(self, name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self, self._feature_attrname(name))
def include_feature(self, name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name) == 0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name, 1)
def include(self, **attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k, v in attrs.items():
include = getattr(self, '_include_' + k, None)
if include:
include(v)
else:
self._include_misc(k, v)
def exclude_package(self, package):
"""Remove packages, modules, and extensions in named package"""
pfx = package + '.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self, package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package + '.'
for p in self.iter_distribution_names():
if p == package or p.startswith(pfx):
return True
def _exclude_misc(self, name, value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self, name, [item for item in old if item not in value])
def _include_misc(self, name, value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self, name, value)
elif not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
else:
new = [item for item in value if item not in old]
setattr(self, name, old + new)
def exclude(self, **attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k, v)
def _exclude_packages(self, packages):
if not isinstance(packages, sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src, alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias, True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class, 'command_consumes_arguments', None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd, opts in self.command_options.items():
for opt, (src, val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_', '-')
if val == 0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
for neg, pos in neg_opt.items():
if pos == opt:
opt = neg
val = None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val == 1:
val = None
d.setdefault(cmd, {})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext, tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if six.PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
msg = (
"Features are deprecated and will be removed in a future "
"version. See https://github.com/pypa/setuptools/issues/65."
)
warnings.warn(msg, DeprecationWarning, stacklevel=3)
def __init__(
self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features, (str, Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r, str)
]
er = [r for r in require_features if not isinstance(r, str)]
if er:
extras['require_features'] = er
if isinstance(remove, str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or "
"at least one of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self, dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description + " is required, "
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self, dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self, dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
| {
"repo_name": "st135yle/django-site",
"path": "dbenv/lib/python3.4/site-packages/setuptools/dist.py",
"copies": "15",
"size": "40304",
"license": "mit",
"hash": 3582401093393932000,
"line_mean": 38.9841269841,
"line_max": 79,
"alpha_frac": 0.6084259627,
"autogenerated": false,
"ratio": 4.422692856359047,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 1008
} |
__all__ = ['DockerManager']
import logging
from .api import APIClient
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
class DockerManager(object):
"""
Provides access to the underlying Docker server used by a BugZoo server.
"""
def __init__(self, api: APIClient) -> None:
self.__api = api
def has_image(self, name: str) -> bool:
"""
Determines whether the server has a Docker image with a given name.
"""
path = "docker/images/{}".format(name)
r = self.__api.head(path)
if r.status_code == 204:
return True
elif r.status_code == 404:
return False
self.__api.handle_erroneous_response(r)
def delete_image(self, name: str) -> None:
"""
Deletes a Docker image with a given name.
Parameters:
name: the name of the Docker image.
"""
logger.debug("deleting Docker image: %s", name)
path = "docker/images/{}".format(name)
response = self.__api.delete(path)
if response.status_code != 204:
try:
self.__api.handle_erroneous_response(response)
except Exception:
logger.exception("failed to delete Docker image: %s", name)
raise
else:
logger.info("deleted Docker image: %s", name)
| {
"repo_name": "ChrisTimperley/AutomatedRepairBenchmarks.c",
"path": "bugzoo/client/dockerm.py",
"copies": "1",
"size": "1410",
"license": "mit",
"hash": -1211700045628832300,
"line_mean": 29,
"line_max": 76,
"alpha_frac": 0.5680851064,
"autogenerated": false,
"ratio": 4.12280701754386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 47
} |
__all__ = ('Document',)
from pyPdf import PdfFileWriter, PdfFileReader
from .fields import BaseField
class Document(object):
pages = []
origin = None
def __init__(self, instance):
self.instance = instance
def get_origin(self):
return self.origin
def save(self, to):
origin = self.get_origin()
if not origin:
raise RuntimeError("Please implement get_origin method or origin attribute")
try:
existing_pdf = PdfFileReader(file(origin, "rb"))
except IOError:
raise RuntimeError(u"Failed to open origin file")
output = PdfFileWriter()
for page_id, page_class in enumerate(self.pages):
new_page = page_class(self.instance).save()
base_page = existing_pdf.getPage(0)
base_page.mergePage(new_page)
output.addPage(base_page)
if isinstance(to, basestring):
outputStream = file(to, "wb")
else:
outputStream = to
output.write(outputStream)
outputStream.close()
| {
"repo_name": "wpjunior/pdforms",
"path": "pdforms/document.py",
"copies": "1",
"size": "1139",
"license": "mit",
"hash": -8873073773400031000,
"line_mean": 24.8863636364,
"line_max": 88,
"alpha_frac": 0.5654082529,
"autogenerated": false,
"ratio": 4.397683397683398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01891245559146798,
"num_lines": 44
} |
__all__=["doExtraction", "ExtrProgress"]
import typing
import clr
clr.AddReference("LessMsi.core")
clr.AddReference("LessIO")
import LessIO
from LessMsi.Msi import Wixtracts
from pathlib import Path
__license__="MIT"
__copyright__=r"""
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Copyright (c) 2004 Scott Willeke (http://scott.willeke.com)
Authors:
Scott Willeke (scott@willeke.com)
"""
class ExtrProgress:
"""Represents progress of unpacking"""
__slots__=("current", "total", "fileName")
def __init__(self, current:int, total:int, fileName:Path):
self.current=current
self.total=total
self.fileName=fileName
def __repr__(self):
return self.__class__.__name__+"("+", ".join((str(self.current), str(self.total), str(self.fileName)))+")"
from time import sleep
def doExtraction(msiFileName:Path, outDirName:Path="", filesToExtract:typing.Iterable[Path]=None, progressCallback=None):
"""Extracts files from a .msi.
See https://github.com/activescott/lessmsi/blob/master/src/LessMsi.Cli/Program.cs#L104 for more info
"""
msiFileName=str(Path(msiFileName).absolute())
outDirName=str(Path(outDirName).absolute())
if filesToExtract:
filesToExtract=(str(Path(outDirName).absolute()) for f in filesToExtract)
msiFile = LessIO.Path(msiFileName)
if progressCallback:
def cb(progress:Wixtracts.ExtractionProgress):
#progress.Activity
return progressCallback(ExtrProgress(progress.FilesExtractedSoFar, progress.TotalFileCount, Path(progress.CurrentFileName)))
cb=clr.System.AsyncCallback(cb)
else:
cb=None
Wixtracts.ExtractFiles(msiFile, outDirName, filesToExtract, cb)
try:
from tqdm import tqdm
def doExtractionWithTqdmProgressBar(msiFileName:Path, outDirName:Path="", filesToExtract:typing.Iterable[Path]=None, progressCallback=None):
"""Extracts files from a .msi showing a tqdm-based progressbar."""
prev=0
with tqdm(unit="file") as pb:
def cb(progr:ExtrProgress):
nonlocal pb, prev
pb.desc=str(progr.fileName)
delta=progr.current-prev
prev=progr.current
pb.total=progr.total
pb.update(delta)
if progressCallback:
return progressCallback(progr)
doExtraction(msiFileName, outDirName=outDirName, filesToExtract=filesToExtract, progressCallback=cb)
__all__.append(doExtractionWithTqdmProgressBar.__name__)
except ImportError:
pass
| {
"repo_name": "activescott/lessmsi",
"path": "contrib/python/LessMsi.py",
"copies": "1",
"size": "3294",
"license": "mit",
"hash": 6045208784910004000,
"line_mean": 44.75,
"line_max": 460,
"alpha_frac": 0.7680631451,
"autogenerated": false,
"ratio": 3.4276795005202914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4695742645620291,
"avg_score": null,
"num_lines": null
} |
__all__ = [ "Domain", "Peer", "Database", "Process" ]
""" This module provides basic "entity" support, similar to what's available
in the com.nuodb.entity Java package. A Domain instance provides entry into
a domain, and optionally a hook for getting called back when domain-level
events happen. The Domain provides access to Peers, Databases and Processes.
To create a Domain 'connection' you need to give a broker address (a string
which may end in ':PORT') and domain password. You can also supply a class
to notify on domain events. That class may implement any of the methods:
peer_joined(self, peer)
peer_left(self, peer)
process_joined(self, process)
process_left(self, process)
process_failed(self, peer, reason)
process_status_changed(self, process, status)
database_joined(self, database)
database_left(self, database)
closed(self)
For instance, a valid listener could be formed like this:
class MyListener():
def database_joined(self, database):
pass
def closed(self):
pass
It would then get used when joining a domain:
domain = Domain('localhost:48004', 'admin', 'bird', MyListener())
When finished with a Domain, all users must call disconnect() to ensure that
connections are closed and that the listening thread shuts down.
TODO: This class doesn't handle entry broker failure by trying to connect
to another broker. Either this should be added, or some clear exception
should be raised to help the caller make this happen.
"""
from .session import BaseListener, Session, SessionMonitor, SessionException
from .util import DatabaseAction, startProcess, killProcess, doDatabaseAction, queryEngine
import time, json, socket
from threading import Event, Lock
import xml.etree.ElementTree as ElementTree
class Domain(BaseListener):
"""Represents the NuoDB domain.
The domain is the top level NuoDB management object. The domain object
provides access to the peers and databases that are contained within.
"""
def __init__(self, broker_addr, domain_user, domain_pwd, listener=None):
"""
:type broker_addr str
:type domain_user str
:type domain_pwd str
:type listener
"""
if not domain_pwd:
raise Exception("A password is required to join a domain")
self.__session = Session(broker_addr, service="Monitor")
self.__session.authorize(domain_user, domain_pwd)
self.__user = domain_user
self.__password = domain_pwd
self.__listener = listener
self.__peers = dict()
""" :type : dict[str, Peer] """
self.__peers_by_addr = dict()
""" :type : dict[str, Peer] """
self.__databases = dict()
""" :type : dict[str, Database] """
self.__monitor = SessionMonitor(self.__session, self)
# These will be set in handle status after joining the domain
self.__domain_name = None
""" :type : str """
self.__entry_peer = None
""" :type : Peer """
try:
self.__session.doConnect()
self.__handle_status(self.__session.recv())
except Exception:
self.__monitor.close()
raise
self.__monitor.start()
def __str__(self):
return self.domain_name + " [Entered through: " + self.entry_peer.connect_str + "]"
def disconnect(self):
"""Disconnect from the domain."""
self.__monitor.close()
def _send_domain_message(self, service, attributes=None, text=None, children=None):
session = Session(self.__entry_peer.address, port=self.__entry_peer.port, service=service)
session.authorize(self.__user, self.__password)
return session.doRequest(attributes, text, children)
@property
def user(self):
"""Return the domain user."""
return self.__user
@property
def password(self):
"""Return the domain password."""
return self.__password
@property
def domain_name(self):
"""Return the domain name."""
return self.__domain_name
def find_peer(self, address, port=None):
"""
Find a peer by address
:type: address str
:type: port int or str
:rtype: Peer
"""
if port is None:
if ":" in address:
address, port = address.split(':', 2)
else:
port = self.__entry_peer.port
else:
if ":" in address:
address, _ = address.split(':', 2)
ip = socket.gethostbyname(address)
inet_sock_addr = ":".join([ip, str(port)])
try:
return self.__peers_by_addr[inet_sock_addr]
except Exception as exception:
print(exception.message)
session = Session(address, port=port, service="Identity")
session.authorize(self.__user, self.__password)
response = session.doRequest()
try:
root = ElementTree.fromstring(response)
if self.__domain_name != root.get("Domain"):
return None
peer = self.get_peer(root.get("AgentId"))
if peer:
self.__peers_by_addr[peer._get_normalized_addr()] = peer
return peer
except Exception as exception:
print(exception.message)
return None
def get_peer(self, agent_id):
"""
Return a peer for a given agent_id.
:type agent_id str
:rtype: Peer
"""
return self.__peers.get(agent_id)
@property
def peers(self):
"""
Return a list of all peers in the domain.
:rtype: list[Peer]
"""
return self.__peers.values()
@property
def entry_peer(self):
"""
Return the peer that was used to enter the domain.
:rtype: Peer
"""
return self.__entry_peer
def get_database(self, name):
"""
Return a database by name
:type name str
:rtype: Database
"""
return self.__databases.get(name)
@property
def databases(self):
"""
Return a list of databases in the domain
:rtype: list[Database]
"""
return self.__databases.values()
def create_template(self, template_name, summary, requirements):
"""Create template by name"""
response = self._send_domain_message(**Template.build_create_request(template_name, summary, requirements))
return ElementTree.fromstring(response).tag == Template.success_message
def update_template(self, template_name, summary, requirements):
"""Update template by name"""
response = self._send_domain_message(**Template.build_update_request(template_name, summary, requirements))
return ElementTree.fromstring(response).tag == Template.success_message
def delete_template(self, template_name):
"""Delete template by name"""
response = self._send_domain_message(**Template.build_delete_request(template_name))
return ElementTree.fromstring(response).tag == Template.success_message
def get_template(self, template_name):
"""Return a template by name"""
response = self._send_domain_message(**Template.build_get_request(template_name))
return Template.from_message(response)
@property
def templates(self):
"""Return a list of templates in the domain"""
response = self._send_domain_message(**Template.build_list_request())
return Template.from_list_message(response)
def create_description(self, name, template_name, variables, dba_user, dba_password):
response = self._send_domain_message(**Description.build_create_request(name, template_name, variables, dba_user, dba_password))
return ElementTree.fromstring(response).tag == Description.success_message
def update_description(self, name, template_name, variables):
response = self._send_domain_message(**Description.build_update_request(name, template_name, variables))
return ElementTree.fromstring(response).tag == Description.success_message
def delete_description(self, name):
response = self._send_domain_message(**Description.build_delete_request(name))
return ElementTree.fromstring(response).tag == Description.success_message
def get_description(self, name):
response = self._send_domain_message(**Description.build_get_request(name))
return Description.from_message(response)
def start_description(self, name):
response = self._send_domain_message(**Description.build_start_request(name))
return ElementTree.fromstring(response).tag == Description.success_message
def stop_description(self, name):
response = self._send_domain_message(**Description.build_stop_request(name))
return ElementTree.fromstring(response).tag == Description.success_message
@property
def descriptions(self):
response = self._send_domain_message(**Description.build_list_request())
return Description.from_list_message(response)
def shutdown(self, graceful=True):
"""Shutdown all databases in the domain.
graceful -- (default True) means that the database will first
be quiesced and then shutdown.
"""
for database in list(self.__databases.values()):
database.shutdown(graceful)
def message_received(self, root):
"""Process a management message from the broker.
Override from session.BaseListener.
"""
if root.tag == "Event":
event_type = root.get("Type")
if event_type == "NewBroker":
self.__peer_joined(Peer.from_message(self, root.find("Broker")))
elif event_type == "BrokerExit":
self.__peer_left(Peer.from_message(self, root.find("Broker")))
elif event_type == "StatusChanged":
status = root.get("Status")
process_element = root.find("Process")
db = self.__databases[process_element.get("Database")]
process = Process.from_message(db, process_element)
self.__process_status_changed(process, status)
elif event_type == "ProcessFailed":
peer = Peer.from_message(self, root.find("Broker"))
peer = self.get_peer(peer.id)
reason = root.get("Reason")
start_id = root.get("StartId")
self.__process_failed(peer, start_id, reason)
elif event_type == "NewProcess" or event_type == "ProcessExit":
process_element = root.find("Process")
db_name = process_element.get("Database")
if db_name not in self.__databases:
self.__databases[db_name] = Database(self, db_name)
if self.__listener:
try:
self.__listener.database_joined(self.__databases[db_name])
except AttributeError:
pass
if event_type == "NewProcess":
start_id = process_element.get("StartId")
self.__process_joined(Process.from_message(self.__databases[db_name],
process_element), start_id)
else:
self.__process_left(Process.from_message(self.__databases[db_name],
process_element))
def closed(self):
"""Called when the session is closed.
Override from session.BaseListener.
"""
if self.__listener:
try:
self.__listener.closed()
except AttributeError:
pass
def __handle_status(self, message):
"""Handle initial domain status on domain connection.
Note that this is ONLY for processing the initial status message. All
further update messages are processed by message_received()."""
root = ElementTree.fromstring(message)
if root.tag != "Status":
raise Exception("Expected status message; got " + root.tag)
self.__domain_name = root.get("Domain")
self.__entry_peer = Peer(self, self.__session.address, root.get("AgentId"),
(root.get("Role") == "Broker"), self.__session.port,
root.get("Hostname"), root.get("Version"))
self.__peer_joined(self.__entry_peer)
for child in list(root):
if child.tag == "Broker":
self.__peer_joined(Peer.from_message(self, child))
for child in list(root):
if child.tag == "Database":
name = child.get("Name")
if self.__listener:
try:
self.__listener.database_joined(self.__databases[name])
except AttributeError:
pass
for process_element in list(child):
if process_element.tag == "Process":
if name not in self.__databases:
self.__databases[name] = Database(self, name)
self.__process_joined(Process.from_message(self.__databases[name], process_element), None)
def __peer_joined(self, peer):
"""Called when a peer joins the domain."""
self.__peers[peer.id] = peer
self.__peers_by_addr[peer._get_normalized_addr()] = peer
if self.__listener:
try:
self.__listener.peer_joined(peer)
except AttributeError:
pass
def __peer_left(self, peer):
"""Called when a peer leaves the domain."""
del self.__peers[peer.id]
del self.__peers_by_addr[peer._get_normalized_addr()]
if self.__listener:
try:
self.__listener.peer_left(peer)
except AttributeError:
pass
def __process_joined(self, process, start_id):
"""Called when a process joins the domain."""
process.database._add_process(process)
process.peer._notify_start_id(start_id, process)
if self.__listener:
try:
self.__listener.process_joined(process)
except AttributeError:
pass
def __process_left(self, process):
"""Called when a process leaves the domain."""
database = process.database
database._remove_process(process)
process.peer._remove_process(process)
if self.__listener:
try:
self.__listener.process_left(process)
except AttributeError:
pass
if len(database.processes) == 0:
del self.__databases[database.name]
if self.__listener:
try:
self.__listener.database_left(database)
except AttributeError:
pass
def __process_failed(self, peer, start_id, reason):
"""Called when a process in the domain fails."""
peer._notify_start_id(start_id, reason)
if self.__listener:
try:
self.__listener.process_failed(peer, reason)
except AttributeError:
pass
def __process_status_changed(self, process, status):
"""Called when a process in the domain changes status."""
process._set_status(status)
if self.__listener:
try:
self.__listener.process_status_changed(process, status)
except AttributeError:
pass
def _send_management_message(self, message, peer, process):
"""Send a management message.
Note that this is an initial verison only to support the shutdown
routine that doesn't need to watch for return messages ... right now
this module is only supporting the tests, which don't need the other
management routines at this point, so we'll flesh this out (as in the
Java implementation) in the second round when other utilites get
updated as well
"""
root = ElementTree.fromstring("<ManagementRequest AgentId=\"%s\" ProcessId=\"%i\"/>" % (peer.id, process.pid))
root.append(message)
self.__session.send(ElementTree.tostring(root))
class Peer(object):
"""Represents a peer (or host) in the domain."""
def __init__(self, domain, address, agent_id, broker=False, port=48004, hostname=None, version=None):
"""
:type domain Domain
:type address str
:type agent_id str
:type broker bool
:type port int
:type hostname str
:type version str
"""
self.__domain = domain
self.__address = address
self.__id = agent_id
self.__is_broker = broker
self.__port = port
self.__hostname = hostname
self.__lock = Lock()
self.__processes = dict()
self.__version = version
self.__start_id_slots = dict()
self.__inet_sock_addr = None
@staticmethod
def from_message(domain, peer_element):
""""Construct a new peer object from an XML message."""
return Peer(domain, peer_element.get("Address"), peer_element.get("AgentId"),
peer_element.get("Role") == "Broker", peer_element.get("Port"),
peer_element.get("Hostname"), peer_element.get("Version"))
def __hash__(self):
return hash(self.__id)
def __eq__(self, other):
if not other:
return False
return self.id == other.id
def __ne__(self, other):
return self.__eq__(other) != True
def __str__(self):
role = "broker" if self.is_broker else "agent"
return self.connect_str + " [role=" + role + "]"
@property
def domain(self):
"""
Return the domain that contains this peer.
:rtype: Domain
"""
return self.__domain
@property
def address(self):
"""
Return the address of this peer.
:rtype: str
"""
return self.__address
@property
def connect_str(self):
"""
Return the connect string for this peer.
:rtype: str
"""
return self.__address + ":" + str(self.__port)
@property
def port(self):
"""
Return the port that this peer is using.
:rtype: int
"""
return self.__port
@property
def id(self):
"""
Return the id of this peer (agent_id).
:rtype: str
"""
return self.__id
@property
def hostname(self):
"""
Return the hostname of this peer.
:rtype: str
"""
return self.__hostname
@property
def version(self):
"""
Return the NuoDB release version of this peer.
:rtype: str
"""
return self.__version
@property
def is_broker(self):
"""
Return True if this peer is a broker.
:rtype: bool
"""
return self.__is_broker
@property
def tags(self):
"""
Return all host tags
:rtype: dict[str,str]
"""
message = self.__domain._send_domain_message("Tag", {'Action': 'GetHostTags', 'AgentId': self.id})
tags = ElementTree.fromstring(message)
data = {}
for tag in tags:
data[tag.get('Key')] = tag.get('Value')
return data
def get_tag(self, tag):
"""
Return host tag
:rtype: str
"""
return self.tags[tag]
def set_tag(self, key, value):
"""
Set host tag
:type key str
:type value str
"""
element = ElementTree.fromstring("<Tag Key=\"%s\" Value=\"%s\"/>" % (key, value))
self.__domain._send_domain_message("Tag", {'Action': 'SetHostTags', 'AgentId': self.id}, children=[element])
def delete_tag(self, key):
"""
Delete host tag
:type key str
"""
element = ElementTree.fromstring("<Tag Key=\"%s\"/>" % (key))
self.__domain._send_domain_message("Tag", {'Action': 'DeleteHostTags', 'AgentId': self.id}, children=[element])
def start_transaction_engine(self, db_name, options=None, wait_seconds=None):
"""Start a transaction engine on this peer for a given database.
options -- accepts a list of two element tuples, where the first element
is a nuodb option flag and the second is the value. For options that
do not accept a value, pass None as the value.
If this is the first transaction engine to be started for a database
you must include --dba-user and --dba-password in the options.
wait_seconds -- defines how long to wait for the transaction engine to
start. The default is None, which does not wait for a response.
Specifying a wait_seconds value will cause this function to block
until a response is received indicating success or failure. If the
time elapses without a response a SessionException will be raised.
:type db_name str
:type options list[tuple[str]]
:type wait_seconds int
:rtype: Process
"""
return self.__start_process(db_name, options, wait_seconds)
def start_storage_manager(self, db_name, archive, initialize, options=None, wait_seconds=None):
"""Start a storage manager on this peer for a given database.
archive -- the archive location for the new storage manager.
initialize -- should be set to True if this storage manager is being
started with a new archive.
options -- accepts a list of two element tuples, where the first
element is a nuodb option flag and the second is the value. For
options that do not accept a value, pass None as the value.
wait_seconds -- defines how long to wait for the storage manager to
start. The default is None, which does not wait for a response.
Specifying a wait_seconds value will cause this function to block
until a response is received indicating success or failure. If the
time elapses without a response, a SessionException will be raised.
:type db_name str
:type archive str
:type initialize bool
:type options list[tuple[str]]
:type wait_seconds int
:rtype: Process
"""
if not options:
options = []
else:
options = list(options)
options.append(("--archive", archive))
if initialize:
options.append(("--initialize", None))
return self.__start_process(db_name, options, wait_seconds)
def __start_process(self, db_name, options, wait_seconds):
"""
:type db_name str
:type options list[tuple[str]]
:type wait_seconds int
:rtype: Process | None
"""
if wait_seconds is None:
startProcess(self.connect_str, self.__domain.user, self.__domain.password, db_name, options)
return
e = Event()
# acquire the lock to avoid _notify_start_id reading the __start_id_slots map before we put the event inside it
self.__lock.acquire()
try:
start_response = startProcess(self.connect_str, self.__domain.user, self.__domain.password, db_name, options)
start_id = ElementTree.fromstring(start_response).get("StartId")
if not start_id:
return
self.__start_id_slots[start_id] = e
finally:
self.__lock.release()
if wait_seconds == 0:
e.wait()
else:
e.wait(wait_seconds)
if not e.isSet():
del self.__start_id_slots[start_id]
raise SessionException("Timed out waiting for process start")
result = self.__start_id_slots[start_id]
del self.__start_id_slots[start_id]
# if the process failed to start in some known way then what's in the
# "slot" will be some meaningful error message, not a process instance
if not isinstance(result, Process):
raise SessionException(str(result))
return result
# NOTE: the "result" parameter should be an instance of Process or, in the
# case that startup failed, anything that can be evaluated as str(result)
# where the string is a meaningful description of the failure
def _notify_start_id(self, start_id, result):
self.__lock.acquire()
try:
if start_id in self.__start_id_slots:
e = self.__start_id_slots[start_id]
self.__start_id_slots[start_id] = result
e.set()
finally:
self.__lock.release()
def get_local_processes(self, db_name=None):
"""Return a list of the NuoDB processes on this host.
db_name -- (default None) if not None, only return processes on this peer that belong
to a given database. Note that if the database spans multiple peers
this method will only return the subset of processes that are on this
peer.
:rtype: list[Process]
"""
if db_name is None:
return self.__processes.values()
processes = []
for process in self.__processes.values():
if process.database.name == db_name:
processes.append(process)
return processes
def _get_process(self, pid):
"""
:type pid int
:rtype: Process
"""
return self.__processes.get(pid)
def _add_process(self, process):
"""
:type process Process
"""
self.__processes[process.pid] = process
def _remove_process(self, process):
"""
:type process Process
"""
try:
del self.__processes[process.pid]
except Exception as exception:
print(exception.message)
pass
def _get_normalized_addr(self):
"""
Return ip_address:port
:rtype: str
"""
if self.__inet_sock_addr is None:
ip = socket.gethostbyname(self.__address)
inet_sock_addr = ":".join([ip, str(self.__port)])
self.__inet_sock_addr = inet_sock_addr
return self.__inet_sock_addr
class Database(object):
"""Represents a NuoDB database."""
def __init__(self, domain, name):
"""
:type domain Domain
:type name str
"""
self.__domain = domain
self.__name = name
self.__processes = dict()
""" :type : dict[str, Process] """
def __hash__(self):
return hash(self.__name)
def __eq__(self, other):
if not other:
return False
return self.name == other.name and self.domain == other.domain
def __ne__(self, other):
return self.__eq__(other) != True
def __str__(self):
return self.name
@property
def domain(self):
"""
Return the domain that contains this database.
:rtype: Domain
"""
return self.__domain
@property
def name(self):
"""
Return the name of this database.
:rtype: str
"""
return self.__name
@property
def description(self):
"""Return the description of this database."""
message = self.__domain._send_domain_message("Description", {'Action': 'GetDatabaseDescription', 'DatabaseName': self.__name})
return json.loads(ElementTree.fromstring(message).text)
@property
def status(self):
"""Return the status of the database."""
#TODO: hack to determine database state
data = {'RUNNING': 0, 'QUIESCED': 0}
for process in self.processes:
if process.status == "RUNNING":
data['RUNNING'] = data['RUNNING'] + 1
if process.status == "QUIESCED":
data['QUIESCED'] = data['QUIESCED'] + 1
if data['RUNNING'] > data['QUIESCED']:
return "RUNNING"
if data['QUIESCED'] > data['RUNNING']:
return "QUIESCED"
@property
def storage_managers(self):
"""Return storage managers."""
return [process for process in self.__processes.values() if not process.is_transactional]
@property
def transaction_engines(self):
"""Return transaction engines."""
return [process for process in self.__processes.values() if process.is_transactional]
def _add_process(self, process):
self.__processes[self.__process_id(process)] = process
def _remove_process(self, process):
del self.__processes[self.__process_id(process)]
@property
def processes(self):
"""Return a list of all processes in this database."""
return self.__processes.values()
def __process_id(self, process):
return process.peer.id + ":" + str(process.pid)
def shutdown(self, graceful=True):
"""Shutdown this database.
graceful -- (default True) if True, the database processes will be shutdown gracefully.
"""
if len(self.__processes) == 0:
return
failure_count = 0
failure_text = ""
for process in list(self.__processes.values()):
if process.is_transactional:
try:
if graceful:
process.shutdown()
else:
process.kill()
except Exception as e:
failure_count = failure_count + 1
failure_text = failure_text + str(e) + "\n"
for process in list(self.__processes.values()):
if not process.is_transactional:
try:
if graceful:
process.shutdown()
else:
process.kill()
except Exception as e:
failure_count = failure_count + 1
failure_text = failure_text + str(e) + "\n"
if failure_count > 0:
raise SessionException("Failed to shutdown " + str(failure_count) + " process(es)\n" + failure_text)
def quiesce(self, wait_seconds=0):
"""Quiesce the database.
wait_seconds -- (default 0) defines how long to wait for the database
to quiesce. If wait_seconds is 0 quiesce will not wait for a response.
If wait_seconds is not 0 quiesce will block until the database is
quiesced or wait_seconds seconds pass. If the database does not
respond with a status of QUIESCED within the timeout, a
SessionException will be raised.
"""
doDatabaseAction(self.__domain.entry_peer.connect_str,
self.__domain.user, self.__domain.password,
self.__name, DatabaseAction.Quiesce)
if wait_seconds == 0:
return
if not self.__wait_for_status("QUIESCED", wait_seconds):
raise SessionException("Timed out waiting to quiesce database")
def unquiesce(self, wait_seconds=0):
"""Unquiesce the database.
wait_seconds -- (default 0) defines how long to wait for the database
to unquiesce. If wait_seconds is 0 unquiesce will not wait for a response.
If wait_seconds is not 0 unquiesce will block until the database is
running or wait_seconds seconds pass. If the database does not
respond with a status of RUNNING within the timeout, a
SessionException will be raised.
"""
doDatabaseAction(self.__domain.entry_peer.connect_str,
self.__domain.user, self.__domain.password,
self.__name, DatabaseAction.Unquiesce)
if wait_seconds == 0:
return
if not self.__wait_for_status("RUNNING", wait_seconds):
raise SessionException("Timed out waiting to unquiesce database")
def update_configuration(self, name, value=None):
option_element = ElementTree.fromstring("<Option Name=\"%s\">%s</Option>" %
(name, value if value is not None else ""))
doDatabaseAction(self.__domain.entry_peer.connect_str,
self.__domain.user, self.__domain.password,
self.__name, DatabaseAction.UpdateConfiguration,
child=option_element)
def __wait_for_status(self, status, wait_seconds):
remaining_processes = list(self.__processes.values())
while wait_seconds >= 0:
for process in remaining_processes:
if process.status == status:
remaining_processes.remove(process)
if len(remaining_processes) == 0:
return True
if wait_seconds > 0:
time.sleep(1)
wait_seconds = wait_seconds - 1
return False
class Process(object):
"""Represents a NuoDB process (TE or SM)"""
def __init__(self, peer, database, port, pid, transactional, status, hostname, version, node_id):
"""
:type peer Peer
:type database Database
:type port int
:type pid int
:type transactional bool
:type status str
:type hostname str
:type version str
:type node_id int
"""
self.__peer = peer
self.__database = database
self.__port = port
self.__pid = pid
self.__transactional = transactional
self.__hostname = hostname
self.__version = version
if node_id is not None:
self.__node_id = int(node_id)
else:
self.__node_id = None
peer._add_process(self)
if status is not None:
self.__status = status
else:
self.__status = "UNKNOWN"
@staticmethod
def from_message(database, process_element):
"""Construct a new process from an XML message."""
peer = database.domain.get_peer(process_element.get("AgentId"))
if peer is None:
raise Exception("Process is for an unknown peer")
pid = int(process_element.get("ProcessId"))
process = peer._get_process(pid)
if process is not None:
return process
return Process(peer, database, int(process_element.get("Port")),
pid, int(process_element.get("NodeType")) == 1,
process_element.get("State"), process_element.get("Hostname"),
process_element.get("Version"), process_element.get("NodeId"))
def __hash__(self):
return self.__pid
def __eq__(self, other):
if not other:
return False
return self.port == other.port and self.peer == other.peer
def __ne__(self, other):
return self.__eq__(other) != True
def __str__(self):
process_type = "(TE)" if self.is_transactional else "(SM)"
return self.address + ":" + str(self.port) + " [pid=" + str(self.pid)+ "] " + process_type
@property
def peer(self):
"""Return the peer on which this process is running."""
return self.__peer
@property
def database(self):
"""Return the database that contains this process."""
return self.__database
@property
def address(self):
"""Return the address of this process."""
return self.__peer.address
@property
def port(self):
"""Return the port that this process is using."""
return self.__port
@property
def pid(self):
"""Return the process id of this process."""
return self.__pid
@property
def node_id(self):
"""Return the NodeId of this process."""
return self.__node_id
@property
def is_transactional(self):
"""Return True if this process is a Transaction Engine.
Return False if it is a Storage Manager.
"""
return self.__transactional
@property
def hostname(self):
"""Return the hostname of this process."""
return self.__hostname
@property
def version(self):
"""Return the NuoDB release version of this process."""
return self.__version
def shutdown(self, wait_time=0):
"""Shutdown this process.
This is used in a graceful=True database shutdown.
"""
msg = ElementTree.fromstring("<Request Service=\"Admin\" Type=\"Shutdown\" WaitTime=\"%i\"/>" % wait_time)
self.__peer.domain._send_management_message(msg, self.__peer, self)
def kill(self):
"""Kill this process.
This is used in a graceful=False database shutdown.
"""
domain = self.__peer.domain
killProcess(self.__peer.connect_str, domain.user, domain.password, self.pid)
@property
def status(self):
"""Return the status of this process.
Possible statuses are:
ACTIVE - The process has reported that it's ready for database participation.
RUNNING - The process is in its running/active state.
SYNCING - The process is currently synchronizing with the database state.
QUIESCING - The process is starting to quiesce.
UNQUIESCING - The process is moving from being quiesced to running.
QUIESCED - The process is quiesced and will not service transactions.
DIED - The process is recognized as having left the database.
QUIESCING2 - An internal state change in the process of quiescing.
SHUTTING_DOWN - The process is in the process of a soft shutdown.
UNKNOWN - Any unknown state ... this should always be last in this enum
to protect against skew between this enum and the C++ constants.
"""
return self.__status
def wait_for_status(self, status, wait_seconds):
"""Block until this process has a specified status.
If the status is not reached within wait_seconds seconds this method
will return False. If the status is reached it will immediately return
True.
"""
while wait_seconds >= 0:
if self.status == status:
return True
if wait_seconds > 0:
time.sleep(1)
wait_seconds = wait_seconds - 1
return False
def _set_status(self, status):
self.__status = status
# to start, this is just a simple routine that asks for the db password and
# uses that to establish the same direct connection that we've been using
# to this point ... eventually we will support the async request/response
# to send this over the existing connection, but for RC1 that's one too
# many moving pieces to implement and test
def query(self, query_type, msg_body=None):
session = Session(self.peer.connect_str, service="Manager")
session.authorize(self.peer.domain.user, self.peer.domain.password)
pwd_response = session.doRequest(attributes={"Type": "GetDatabaseCredentials",
"Database": self.database.name})
pwd_xml = ElementTree.fromstring(pwd_response)
pwd = pwd_xml.find("Password").text.strip()
return queryEngine(self.address, self.port, query_type, pwd, msg_body)
class Template(object):
success_message = "Success"
@staticmethod
def build_create_request(name, summary, requirements):
summary_element = ElementTree.Element("Summary")
summary_element.text = summary
requirements_element = ElementTree.Element("Requirements")
requirements_element.text = requirements
return {"service": "Description",
"attributes": {'Action': 'CreateTemplate', 'TemplateName': name},
"children": [summary_element, requirements_element]}
@staticmethod
def build_update_request(name, summary, requirements):
summary_element = ElementTree.Element("Summary")
summary_element.text = summary
requirements_element = ElementTree.Element("Requirements")
requirements_element.text = requirements
return {"service": "Description",
"attributes": {'Action': 'UpdateTemplate', 'TemplateName': name},
"children": [summary_element, requirements_element]}
@staticmethod
def build_delete_request(name):
return {"service": "Description", "attributes": {'Action': 'DeleteTemplate', 'TemplateName': name}}
@staticmethod
def build_get_request(name):
return {"service": "Description", "attributes": {'Action': 'GetTemplate', 'TemplateName': name}}
@staticmethod
def build_list_request():
return {"service": "Description", "attributes": {'Action': 'ListTemplates'}}
@staticmethod
def from_message(message):
root = ElementTree.fromstring(message)
name = root.get("TemplateName")
summary = ""
summary_element = root.find("Summary")
if summary_element is not None:
summary = summary_element.text
requirements = ""
requirements_element = root.find("Requirements")
if requirements_element is not None:
requirements = requirements_element.text
return Template(name, summary, requirements)
@staticmethod
def from_list_message(message):
names = list()
root = ElementTree.fromstring(message)
for child in root:
names.append(child.get("TemplateName"))
return names
def __init__(self, name, summary, requirements):
"""
:type name str
:type summary str
:type requirements str
"""
self._name = name
self._summary = summary
self._requirements = requirements
@property
def name(self):
return self._name
@property
def summary(self):
return self._summary
@property
def requirements(self):
return self._requirements
class Description(object):
success_message = "Success"
@staticmethod
def build_create_request(name, template_name, variables, dba_user, dba_password):
template_element = ElementTree.Element("Template")
template_element.text = template_name
variables_element = ElementTree.Element("Variables")
for key in variables:
variable_child = ElementTree.SubElement(variables_element, "Variable")
variable_child.set("Key", key)
variable_child.text = variables[key]
return {"service": "Description",
"attributes": {'Action': 'CreateDescription',
'DatabaseName': name,
'DbaUser': dba_user,
'DbaPassword': dba_password},
"children": [template_element, variables_element]}
@staticmethod
def build_update_request(name, template_name, variables):
template_element = ElementTree.Element("Template")
template_element.text = template_name
variables_element = ElementTree.Element("Variables")
for key in variables:
variable_child = ElementTree.SubElement(variables_element, "Variable")
variable_child.set("Key", key)
variable_child.text = variables[key]
return {"service": "Description",
"attributes": {'Action': 'UpdateDescription',
'DatabaseName': name},
"children": [template_element, variables_element]}
@staticmethod
def build_delete_request(name):
return {"service": "Description", "attributes": {'Action': 'DeleteDescription', 'DatabaseName': name}}
@staticmethod
def build_get_request(name):
return {"service": "Description", "attributes": {'Action': 'GetDescription', 'DatabaseName': name}}
@staticmethod
def build_list_request():
return {"service": "Description", "attributes": {'Action': 'ListDescriptions'}}
@staticmethod
def build_start_request(name):
return {"service": "Description", "attributes": {'Action': 'StartDescription', 'DatabaseName': name}}
@staticmethod
def build_stop_request(name):
return {"service": "Description", "attributes": {'Action': 'StopDescription', 'DatabaseName': name}}
@staticmethod
def from_message(message):
root = ElementTree.fromstring(message)
name = root.get("DatabaseName")
template_name = ""
template_element = root.find("Template")
if template_element is not None:
template_name = template_element.text
variables = {}
variables_element = root.find("Variables")
if variables_element is not None:
for var in variables_element:
key = var.get("Key")
value = var.text
variables[key] = value
status = ""
status_element = root.find("Status")
if status_element is not None:
status = status_element.text
live_status = ""
live_status_element = root.find("LiveStatus")
if live_status_element is not None:
live_status = live_status_element.text
return Description(name, template_name, variables, status, live_status)
@staticmethod
def from_list_message(message):
names = list()
root = ElementTree.fromstring(message)
for child in root:
names.append(child.get("DatabaseName"))
return names
def __init__(self, name, template_name, variables, status, live_status=""):
"""
:type name str
:type template_name str
:type variables dict[str,str]
:type status str
:type live_status str
"""
self._name = name
self._template_name = template_name
self._variables = variables
self._status = status
self._live_status = live_status
@property
def name(self):
return self._name
@property
def template_name(self):
return self._template_name
@property
def variables(self):
return self._variables
@property
def status(self):
return self._status
@property
def live_status(self):
return self._live_status
| {
"repo_name": "tvincentNuoDB/nuodb-python",
"path": "pynuodb/entity.py",
"copies": "1",
"size": "47044",
"license": "bsd-3-clause",
"hash": -1453739839825792000,
"line_mean": 33.7701404287,
"line_max": 136,
"alpha_frac": 0.5814981719,
"autogenerated": false,
"ratio": 4.560294687863513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5641792859763513,
"avg_score": null,
"num_lines": null
} |
__all__ = ["DomTracker"]
from devtools_event_listener import DevToolsEventListener
from status import *
import json
# Tracks the state of the DOM and execution context creation.
class DomTracker(DevToolsEventListener):
def __init__(self, client):
DevToolsEventListener.__init__(self)
self.node_to_frame_map = {}
client.AddListener(self)
# return status and frame_id<string>
def GetFrameIdForNode(self, node_id):
frame_id = self.node_to_frame_map.get(node_id, None)
if frame_id == None:
return (Status(kNoSuchFrame, "element is not frame"), "")
return (Status(kOk), frame_id)
# Overridden from DevToolsEventListener:
def OnEvent(self, client, method, params):
if method == "DOM.setChildNodes":
nodes = params.get("nodes")
if nodes == None:
return Status(kUnknownError, "DOM.setChildNodes missing 'nodes'")
if not self._ProcessNodeList(nodes):
js = json.dumps(nodes)
return Status(kUnknownError, "DOM.setChildNodes has invalid 'nodes': " + js)
elif method == "DOM.childNodeInserted":
node = params.get("node")
if node == None:
return Status(kUnknownError, "DOM.childNodeInserted missing 'node'")
if not self._ProcessNode(node):
js = json.dumps(node)
return Status(kUnknownError, "DOM.childNodeInserted has invalid 'node': " + js)
elif method == "DOM.documentUpdated":
self.node_to_frame_map.clear()
params = {}
client.SendCommand("DOM.getDocument", params)
return Status(kOk)
def OnConnected(self, client):
self.node_to_frame_map.clear()
# Fetch the root document node so that Inspector will push DOM node
# information to the client.
params = {}
return client.SendCommand("DOM.getDocument", params)
def _ProcessNodeList(self, nodes_list=[]):
if type(nodes_list) != list:
return False
for node in nodes_list:
#if not node:
# return False
if not self._ProcessNode(node):
return False
return True
def _ProcessNode(self, node):
dic = node
if type(dic) != dict:
return False
node_id = dic.get("nodeId")
if type(node_id) != int:
return False
frame_id = dic.get("frameId")
if type(frame_id) == str:
self.node_to_frame_map[node_id] = frame_id
children = dic.get("children")
if children:
return self._ProcessNodeList(children)
return True
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/dom_tracker.py",
"copies": "1",
"size": "2427",
"license": "bsd-3-clause",
"hash": 4967637890160209000,
"line_mean": 31.36,
"line_max": 87,
"alpha_frac": 0.6538936959,
"autogenerated": false,
"ratio": 3.638680659670165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4792574355570165,
"avg_score": null,
"num_lines": null
} |
__all__ = ['DotifyDict']
class DotifyDict(dict):
def __init__(self, data=None):
data = data or {}
for k, v in data.items():
k = str(k)
if isinstance(v, dict):
setattr(self, k, DotifyDict(v))
else:
setattr(self, k, v)
def __repr__(self):
return super(DotifyDict, self).__repr__()
def __setitem__(self, key, value):
if '.' in key:
myKey, restOfKey = key.split('.', 1)
target = self.set_default(myKey, DotifyDict())
if not isinstance(target, DotifyDict):
raise KeyError, 'cannot set "{0}" in "{1}" ({2})'.format(restOfKey, myKey, repr(target))
target[restOfKey] = value
else:
if isinstance(value, dict) and not isinstance(value, DotifyDict):
value = DotifyDict(value)
super(DotifyDict, self).__setitem__(key, value)
def __getitem__(self, key):
if '.' not in key:
try:
return super(DotifyDict, self).__getitem__(key)
except KeyError:
return None
myKey, restOfKey = key.split('.', 1)
target = super(DotifyDict, self).__getitem__(myKey)
if not isinstance(target, DotifyDict):
raise KeyError, 'cannot get "{0}" in "{1}" ({2})'.format(restOfKey, myKey, repr(target))
return target[restOfKey]
def __contains__(self, key):
if '.' not in key:
return super(DotifyDict, self).__contains__(key)
myKey, restOfKey = key.split('.', 1)
target = super(DotifyDict, self).__getitem__(myKey)
if not isinstance(target, DotifyDict):
return False
return restOfKey in target
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def update(self, other):
for k, v in other.iteritems():
try:
if isinstance(v, DotifyDict):
d = getattr(self, k)
self[k] = d.update(v)
elif isinstance(v, list):
self[k].extend(other[k])
elif isinstance(v, set):
self[k].update(other[k])
else:
self[k] = other[k]
except AttributeError:
self[k] = other[k]
return self
def set_default(self, key, default):
if key not in self:
self[key] = default
return self[key]
__setattr__ = __setitem__
__getattr__ = __getitem__
| {
"repo_name": "rocktavious/pyul",
"path": "pyul/coreUtils/dotifydict.py",
"copies": "1",
"size": "2631",
"license": "mit",
"hash": 3317074368692226600,
"line_mean": 32.7307692308,
"line_max": 104,
"alpha_frac": 0.5013302927,
"autogenerated": false,
"ratio": 4.079069767441861,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5080400060141861,
"avg_score": null,
"num_lines": null
} |
__all__ = ['dot', 'subset_assignment', 'subset_slice_assignment', 'vector_subset_slice_assignment', 'matrix_addition']
import os
import numpy
import atexit
import math
import pycuda.autoinit
import pycuda.driver as cuda
import pycuda.gpuarray as gpuarray
import scikits.cuda.cublas as cublas
import scikits.cuda.linalg as linalg
from pycuda.compiler import SourceModule
from .utils import gpu_func
from .enums import MAX_BLOCK_SIZE, CUR_DIR, CACHE_DIR
handle = cublas.cublasCreate()
def destroy_cublas():
cublas.cublasDestroy(handle)
atexit.register(destroy_cublas)
mod = SourceModule(open(os.path.join(CUR_DIR, 'kernel/matrix.cu')).read(), cache_dir=CACHE_DIR)
subset_assignment_kernel = mod.get_function('subset_assignment_kernel')
subset_slice_assignment_kernel = mod.get_function('subset_slice_assignment_kernel')
vector_subset_slice_assignment_kernel = mod.get_function('vector_subset_slice_assignment_kernel')
# TODO: generalize
def subset_assignment(d_a, d_b, a_x):
thread_size = min(d_b.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_b.size / float(thread_size))), 1)
subset_assignment_kernel(
d_a, d_b, numpy.int32(a_x), numpy.int32(d_b.size),
block=(thread_size,1,1), grid=(block_size,1,1))
def subset_slice_assignment(d_a, d_b, a_x_slice):
thread_size = min(d_b.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_b.size / float(thread_size))), 1)
assert (a_x_slice[1] - a_x_slice[0]) == d_b.shape[0]
subset_slice_assignment_kernel(
d_a, d_b, numpy.int32(a_x_slice[0]), numpy.int32(d_b.size), numpy.int32(numpy.prod(d_b.shape[1:])),
block=(thread_size,1,1), grid=(block_size,1,1))
def vector_subset_slice_assignment(d_a, d_b, a_slice):
thread_size = min(d_b.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_b.size / float(thread_size))), 1)
assert len(d_a.shape) == 1
assert len(d_b.shape) == 1
assert (a_slice[1] - a_slice[0]) == d_b.shape[0]
vector_subset_slice_assignment_kernel(
d_a, d_b, numpy.int32(a_slice[0]), numpy.int32(d_b.size),
block=(thread_size,1,1), grid=(block_size,1,1))
@gpu_func
def dot(d_a, d_b, transa='N', transb='N', out=None):
if out is None:
if transa == 'T':
out_x = d_a.shape[1]
else:
out_x = d_a.shape[0]
if transb == 'T':
out_y = d_b.shape[0]
else:
out_y = d_b.shape[1]
out = gpuarray.empty((out_x, out_y), numpy.float32)
return linalg.dot(d_a, d_b, transa=transa, transb=transb, handle=handle, out=out)
@gpu_func
def matrix_addition(d_a, d_b):
# Overwrites d_a
assert d_a.shape == d_b.shape
if len(d_a.shape) == 1:
# Vector addition
cublas.cublasSaxpy(handle, d_a.size, 1.0, d_b.gpudata, 1, d_a.gpudata, 1)
elif len(d_a.shape) == 2:
# Matrix addition
m, n = d_a.shape
cublas.cublasSgeam(handle,
'N', 'N',
m, n,
1.0,
d_a.gpudata, m,
1.0,
d_b.gpudata, m,
d_a.gpudata, m)
else:
tmp = (d_a.ravel() + d_b.ravel()).reshape(d_a.shape)
cuda.memcpy_dtod(d_a.gpudata, tmp.gpudata, d_a.nbytes)
return d_a
| {
"repo_name": "Captricity/sciguppy",
"path": "sciguppy/matrix.py",
"copies": "1",
"size": "3278",
"license": "mit",
"hash": -5218300648358515000,
"line_mean": 36.25,
"line_max": 118,
"alpha_frac": 0.6143990238,
"autogenerated": false,
"ratio": 2.7826825127334467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38970815365334466,
"avg_score": null,
"num_lines": null
} |
__all__ = ["DottedList", "SingleFloat", "Char", "Vector", "Cons",
"Pointer", "RRef", "Index",
"Package", "Symbol", "package", "intern", "keyword"]
import pyconspack
import pyconspack.error as E
class CustomType:
def __repr__(self):
return "pyconspack." + self.__class__.__name__ + "(" + self.base_repr() + self.extra_repr() + ")"
def base_repr(self): return ""
def extra_repr(self): return ""
class CustomBuiltinType(CustomType):
def base_repr(self):
base = self.__class__.__bases__[1]
return base.__repr__(self)
class DottedList(CustomBuiltinType, list): pass
class SingleFloat(CustomBuiltinType, float): pass
class Char(CustomBuiltinType, str): pass
class Vector(CustomBuiltinType, list): pass
class Pointer(CustomBuiltinType, int): pass
class Index(CustomBuiltinType, int): pass
class Cons(CustomBuiltinType, tuple): pass
class RRef(CustomType):
def __init__(self, val):
self.value = val
def extra_repr(self):
return self.value.__repr__()
class Package(CustomType):
packages = dict()
def find(name, keepcase=False):
if(not name):
return name
elif(type(name) is Package):
return name
elif(not keepcase):
name = name.upper()
if(name in Package.packages):
return Package.packages[name]
def __init__(self, name, keepcase=False):
if(not keepcase):
name = name.upper()
if(name in Package.packages):
raise E.PackageExists("Package called {n} already exists".format(n=name))
self.name = name
self.symbols = dict()
Package.packages[name] = self
def intern(self, symbol):
if(type(symbol) is str):
symbol = Symbol(symbol)
name = symbol.name
if(name in self.symbols and symbol.package == self):
return self.symbols[name]
self.symbols[name] = symbol
symbol.package = self
return symbol
def find_symbol(self, name):
if(name in self.symbols):
return self.symbols[name]
def unintern(self, symbol):
if(symbol.name in self.symbols):
del self.symbols[name]
def extra_repr(self):
return self.name.__repr__()
def __str__(self):
return '<pyconspack.Package ' + self.name + '>'
def package(name, keepcase=False):
return name and (Package.find(name) or Package(name, keepcase))
def intern(name, pkg=None, keepcase=False):
if(not isinstance(name, str)):
raise E.BadValue('{n} is not a string'.format(n=name))
if(not keepcase):
name = name.upper()
if(pkg):
return package(pkg).find_symbol(name) or \
Symbol(name, package(pkg))
else:
return Symbol(name)
def keyword(name, keepcase=False):
kw = pyconspack.types.package('KEYWORD')
if(not keepcase):
name = name.upper()
return kw.find_symbol(name) or \
pyconspack.types.intern(name, 'KEYWORD', keepcase)
class Symbol(CustomType):
# None is a valid package, this represents uninterned symbols
def __init__(self, name, pkg=None, keepcase=False):
if(keepcase):
self.name = name
else:
self.name = name.upper()
self.package = None
pkg = package(pkg)
pkg and pkg.intern(self)
def __str__(self):
if(self.package is package('keyword')):
return ':' + self.name
elif(not self.package):
return '#:' + self.name
else:
return self.package.name + '::' + self.name
def extra_repr(self):
s = self.name.__repr__()
if(self.package is None):
return s + ", None"
else:
return s + ", '" + self.package.name + "'"
def is_keyword(self):
return self.package == pyconspack.types.package('KEYWORD')
| {
"repo_name": "conspack/pyconspack",
"path": "types.py",
"copies": "1",
"size": "3931",
"license": "bsd-2-clause",
"hash": -7834120745731181000,
"line_mean": 27.2805755396,
"line_max": 105,
"alpha_frac": 0.5843296871,
"autogenerated": false,
"ratio": 3.865290068829892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49496197559298916,
"avg_score": null,
"num_lines": null
} |
__all__ = ['dotted', 'set', 'get', 'flatten', 'unflatten']
from simplegeneric import generic
##
# High-level API.
_sentinel = object()
def dotted(o):
"""
Return an object that support dotted key access.
"""
wrapped = wrap(o)
# Check that something has actually registered for this type, i.e. we
# didn't just get wrap's default behaviour.
if o is wrapped and not wrap.has_type(type(unwrap(o))):
raise TypeError(type(o))
return wrapped
def set(o, key, value, container_factory=None):
"""
Set the item with the given dotted key to the given value.
"""
# Unwrap in case it's already a dotted.
o = unwrap(o)
key = str(key)
parent, key = _parent_and_key(o, key, container_factory=container_factory)
setitem(parent, key, value)
def get(o, key, default=_sentinel):
"""
Get the item with the given dotted key.
"""
# Unwrap in case it's already a dotted.
o = unwrap(o)
key = str(key)
return wrap(_get(o, key, default))
def flatten(o):
"""
Flatten an object graph into a sequence of (key, value) pairs where key is
a nested key with segments separated by a '.'.
Note: flattening an object graph is a lossy process - there is no way to
reverse the process reliably without help. Dotted key segments are strings
and there is no way to know if a '0' segment represents a key in a dict or
an index in a list.
"""
stack = [(wrap(o).iteritems(), None)]
while stack:
items_iter, parent_key = stack[-1]
for (key, value) in items_iter:
if parent_key is None:
full_key = key
else:
full_key = '.'.join([parent_key, key])
if value is not unwrap(value):
stack.append((iter(value.items()), full_key))
break
yield full_key, value
else:
stack.pop()
def unflatten(l, container_factory=None):
"""
Expand a flattened list into a graph of dictionaries.
Note: By default, this will not reverse the result of flatten() if the
flattened object contained any lists as there is no information in a key
such as 'foo.0' to say if the container 'foo' is a dict or a list.
"""
if container_factory is None:
container_factory = lambda p, c: {}
root = {}
for (key, value) in l:
set(root, key, value, container_factory=container_factory)
return root
##
# Extension API.
@generic
def setitem(o, key, value):
raise NotImplementedError()
@generic
def getitem(o, key):
raise NotImplementedError()
@generic
def wrap(o):
return o
@generic
def unwrap(o):
return o
##
# Internal implemenation.
def _parent_and_key(o, key, container_factory):
key = key.split('.')
parent_key, item_key = key[:-1], key[-1]
for i in range(len(parent_key)):
container_key = parent_key[:(i+1)]
try:
o = getitem(o, container_key[-1])
except KeyError:
if container_factory is None:
raise
if len(container_key) == len(parent_key):
container = container_factory('.'.join(container_key), item_key)
else:
container = container_factory('.'.join(container_key), parent_key[i+1])
setitem(o, container_key[-1], container)
o = container
return o, item_key
def _get(o, key, default=_sentinel):
key = key.split('.')
parent_key, item_key = key[:-1], key[-1]
try:
for k in parent_key:
o = getitem(o, k)
return getitem(o, item_key)
except KeyError:
if default is _sentinel:
raise
else:
return default
| {
"repo_name": "ish/dottedish",
"path": "dottedish/api.py",
"copies": "1",
"size": "3740",
"license": "bsd-3-clause",
"hash": -4543697612574823000,
"line_mean": 26.5,
"line_max": 87,
"alpha_frac": 0.5938502674,
"autogenerated": false,
"ratio": 3.7512537612838517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9785096904644865,
"avg_score": 0.012001424807797355,
"num_lines": 136
} |
__all__ = ["Downloader"]
from .scraper import Scraper
from .serial import write_story, write_tar, write_zip
import re
ZIP_FILE_REGEX = re.compile(r".*\.zip", re.IGNORECASE)
TAR_FILE_REGEX = re.compile(r".*\.tar(?:\.([a-z]+)|)", re.IGNORECASE)
class Downloader(object):
def __init__(self, recursive=True):
self.recursive = recursive
def download(self, url, dest, debug=False):
if not Scraper.is_chyoa_url(url):
print("warning: This does not look like a CHYOA url. They are usually")
print("warning: in the form of \"https://chyoa.com/story/NAME.ID\"")
if debug: print("Scraper().scrape(%s)" % url)
scraper = Scraper()
scraper.scrape(url)
story = scraper.story
if debug: print("story: %s" % story)
if ZIP_FILE_REGEX.fullmatch(dest):
if debug: print("%s: zip file" % dest)
write_zip(story, dest, is_story=self.recursive)
else:
if debug: print("%s: tar file" % dest)
match = TAR_FILE_REGEX.fullmatch(dest)
if match:
compression = match.group(1)
if compression:
if debug: print("%s: tar.%s file" % (dest, compression))
write_tar(story, dest, compression, is_story=self.recursive)
else:
write_tar(story, dest, is_story=self.recursive)
elif self.recursive:
if debug: print("%s: no compression" % dest)
write_story(story, dest)
else:
if debug: print("%s: no compression (chapter)" % dest)
write_chapter(story, dest)
| {
"repo_name": "sexypants/chyoa-scraper",
"path": "chyoa/download.py",
"copies": "1",
"size": "1675",
"license": "mit",
"hash": 811498152455377000,
"line_mean": 37.0681818182,
"line_max": 83,
"alpha_frac": 0.5540298507,
"autogenerated": false,
"ratio": 3.69757174392936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.475160159462936,
"avg_score": null,
"num_lines": null
} |
__all__ = ('DownloadState',)
import pickle
import time
from datetime import timedelta
from .utils import say, norm_path
class DownloadState:
"""
DownloadState is a semi-persistent storage of episodes have already been
downloaded. We make the assumption that re-downloading episodes is not really
that bad as most torrent clients will recognize this and just mark the files
as "done". Because of this, failure to load previous state simply issues a
warning and moves on
The state file is just a pickled set of episodes. In order to keep the state
file from growing infinitely, the prune method allows for simple removal of
old episodes.
"""
default_prune_age = timedelta(weeks=24)
def __init__(self, state_file, read_only=False):
self.state_file = norm_path(state_file)
self.state = set()
self.read_only = read_only
def __enter__(self):
if not self.load():
say("Warning: Bad or missing state file - Using empty list")
return self
def __exit__(self, type, value, traceback):
if not self.read_only:
self.save()
def prune(self, older_than=default_prune_age):
cutoff = int(time.time() - older_than.total_seconds())
pruned = set(i for i in self.state if i.created < cutoff)
self.state -= pruned
return pruned
def load(self):
try:
with open(self.state_file, 'rb') as f:
loaded = pickle.load(f)
if isinstance(loaded, set):
self.state = loaded
return True
else:
return False
except (OSError, EOFError, pickle.PickleError):
return False
def save(self):
with open(self.state_file, 'wb') as f:
pickle.dump(self.state, f)
def add(self, e):
return self.state.add(e)
def __contains__(self, e):
return e in self.state
def dump(self):
return sorted(self.state)
| {
"repo_name": "nepthar/autopirate",
"path": "autopirate/download_state.py",
"copies": "1",
"size": "1853",
"license": "unlicense",
"hash": -6095886308005093000,
"line_mean": 26.25,
"line_max": 79,
"alpha_frac": 0.6637884512,
"autogenerated": false,
"ratio": 3.7971311475409837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9893880834108286,
"avg_score": 0.013407752926539562,
"num_lines": 68
} |
__all__ = ('Driver', 'DriverNotFoundError')
class Driver(object):
name = None
display = 'Generic Driver'
form = {"host": {"label": "Host", "required": False, "default": None},
"port": {"label": "Port", "required": False, "default": None},
"username": {"label": "Username", "required": False, "default": None},
"password": {"label": "Password", "required": False, "default": None},
"database": {"label": "Database", "required": False, "default": None},
"query": {"label": "Query", "required": False, "default": None}
}
empty_fields = frozenset()
use_database_as_filename = False
def __repr__(self):
return '<Driver %s>' % (self.name,)
__drivers = {}
@classmethod
def get(cls, driver_name):
return cls.__drivers[driver_name.split("+", 1)[0]]
@classmethod
def get_all(cls):
return cls.__drivers.values()
@classmethod
def register(cls, driver_cls):
cls.__drivers[driver_cls.name] = driver_cls
class SQLiteDriver(Driver):
name = 'sqlite'
display = 'SQLite'
form = {"database": {"label": "Filename", "required": False, "default": None}}
empty_fields = frozenset(("host", "port", "username", "password"))
use_database_as_filename = True
class MySQLDriver(Driver):
name = 'mysql'
display = 'MySQL'
form = {"host": {"label": "Host", "required": True, "default": ""},
"port": {"label": "Port", "required": False, "default": 3306},
"username": {"label": "Username", "required": False, "default": None},
"password": {"label": "Password", "required": False, "default": None},
"database": {"label": "Database", "required": True, "default": ""}
}
class PostgreSQLDriver(Driver):
name = 'postgresql'
display = 'PostgreSQL'
form = {"host": {"label": "Host", "required": True, "default": ""},
"port": {"label": "Port", "required": False, "default": 5432},
"username": {"label": "Username", "required": False, "default": None},
"password": {"label": "Password", "required": False, "default": None},
"database": {"label": "Database", "required": True, "default": ""}
}
class FirebirdDriver(Driver):
name = 'firebird'
display = 'Firebird'
form = {"host": {"label": "Host", "required": False, "default": ""},
"port": {"label": "Port", "required": False, "default": 3050},
"username": {"label": "User", "required": False, "default": None},
"password": {"label": "Password", "required": False, "default": None},
"database": {"label": "Filename", "required": True, "default": ""}
}
use_database_as_filename = True
class MsSQLDriver(Driver):
name = 'mssql'
display = 'Microsoft SQL Server'
form = {"host": {"label": "Host", "required": True, "default": ""},
"port": {"label": "Port", "required": False, "default": 1433},
"username": {"label": "Username", "required": False, "default": None},
"password": {"label": "Password", "required": False, "default": None},
"database": {"label": "Database", "required": True, "default": None}
}
Driver.register(SQLiteDriver)
Driver.register(MySQLDriver)
Driver.register(PostgreSQLDriver)
Driver.register(FirebirdDriver)
Driver.register(MsSQLDriver)
class DriverNotFoundError(ValueError):
pass
| {
"repo_name": "coinbox/coinbox-core",
"path": "cbpos/database/driver.py",
"copies": "1",
"size": "3506",
"license": "mit",
"hash": 8668789083604716000,
"line_mean": 39.2988505747,
"line_max": 82,
"alpha_frac": 0.5610382202,
"autogenerated": false,
"ratio": 3.8316939890710384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48927322092710385,
"avg_score": null,
"num_lines": null
} |
__all__ = ['dstack']
from ..core import numeric as _nx
from ..core import atleast_3d
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by `dsplit`.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join arrays.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate(map(atleast_3d,tup),2)
| {
"repo_name": "bussiere/pypyjs",
"path": "website/demo/home/rfk/repos/pypy/lib_pypy/numpypy/lib/shape_base.py",
"copies": "2",
"size": "1306",
"license": "mit",
"hash": 2117283222832045600,
"line_mean": 23.1851851852,
"line_max": 71,
"alpha_frac": 0.5421133231,
"autogenerated": false,
"ratio": 3.6997167138810196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018300653594771241,
"num_lines": 54
} |
__all__ = ['dump']
def dump(mesh, f, e_color=None):
from baiji.serialization.util.openlib import ensure_file_open_and_call
return ensure_file_open_and_call(f, _dump, 'w', mesh, e_color=e_color)
def _dump(f, mesh, e_color=None):
'''
Writes a mesh to collada file format.
'''
dae = mesh_to_collada(mesh, e_color=e_color)
dae.write(f.name)
def dumps(mesh, e_color=None):
'''
Generates a UTF-8 XML string containing the mesh, in collada format.
'''
from lxml import etree
dae = mesh_to_collada(mesh, e_color=e_color)
# Update the xmlnode.
dae.save()
return etree.tostring(dae.xmlnode, encoding='UTF-8')
def mesh_to_collada(mesh, e_color=None):
'''
Supports per-vertex color, but nothing else.
'''
import numpy as np
try:
from collada import Collada, scene
except ImportError:
raise ImportError("lace.serialization.dae.mesh_to_collade requires package pycollada.")
def create_material(dae, name, color=(1, 1, 1)):
from collada import material, scene
effect = material.Effect("{}_effect".format(name), [], "lambert", diffuse=color, specular=(0, 0, 0), double_sided=True)
mat = material.Material("{}_material".format(name), name, effect)
dae.effects.append(effect)
dae.materials.append(mat)
return scene.MaterialNode(name, mat, inputs=[])
def geometry_from_mesh(dae, mesh):
from collada import source, geometry
srcs = []
# v
srcs.append(source.FloatSource("verts-array", mesh.v, ('X', 'Y', 'Z')))
input_list = source.InputList()
input_list.addInput(0, 'VERTEX', "#verts-array")
# vc
if mesh.vc is not None:
input_list.addInput(len(srcs), 'COLOR', "#color-array")
srcs.append(source.FloatSource("color-array", mesh.vc[mesh.f.ravel()], ('X', 'Y', 'Z')))
# f
geom = geometry.Geometry(str(mesh), "geometry0", "mymesh", srcs)
indices = np.dstack([mesh.f for _ in srcs]).ravel()
triset = geom.createTriangleSet(indices, input_list, "tri_material")
geom.primitives.append(triset)
extra_materials = []
# e
if mesh.e is not None:
if e_color is None:
indices = np.dstack([mesh.e for _ in srcs]).ravel()
lineset = geom.createLineSet(indices, input_list, "line_material")
geom.primitives.append(lineset)
else:
edges_rendered = np.zeros(len(mesh.e), dtype=np.bool)
for i, this_e_color in enumerate(e_color):
these_edge_indices = this_e_color["e_indices"]
this_color = this_e_color["color"]
material_name = "line_material_{}".format(i)
indices = np.dstack(
[mesh.e[these_edge_indices] for _ in srcs]
).ravel()
extra_materials.append(
create_material(dae, name=material_name, color=this_color)
)
lineset = geom.createLineSet(indices, input_list, material_name)
geom.primitives.append(lineset)
edges_rendered[these_edge_indices] = True
edges_remaining = (~edges_rendered).nonzero()
if len(edges_remaining):
indices = np.dstack([mesh.e[edges_remaining] for _ in srcs]).ravel()
lineset = geom.createLineSet(indices, input_list, "line_material")
geom.primitives.append(lineset)
dae.geometries.append(geom)
return geom, extra_materials
dae = Collada()
geom, extra_materials = geometry_from_mesh(dae, mesh)
node = scene.Node(
"node0",
children=[
scene.GeometryNode(
geom,
[
create_material(dae, name="tri_material"),
create_material(dae, name="line_material", color=(1, 0, 0)),
]
+ extra_materials,
)
],
)
myscene = scene.Scene("myscene", [node])
dae.scenes.append(myscene)
dae.scene = myscene
return dae
| {
"repo_name": "bodylabs/lace",
"path": "lace/serialization/dae.py",
"copies": "1",
"size": "4242",
"license": "bsd-2-clause",
"hash": -3918767842820193300,
"line_mean": 36.875,
"line_max": 127,
"alpha_frac": 0.5598774163,
"autogenerated": false,
"ratio": 3.7177914110429446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47776688273429446,
"avg_score": null,
"num_lines": null
} |
__all__=('dumpttf',)
def dumpttf(fn,fontName=None, verbose=0):
'''dump out known glyphs from a ttf file'''
import os
if not os.path.isfile(fn):
raise IOError('No such file "%s"' % fn)
from reportlab.pdfbase.pdfmetrics import registerFont, stringWidth
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen.canvas import Canvas
if fontName is None:
fontName = os.path.splitext(os.path.basename(fn))[0]
dmpfn = '%s-ttf-dump.pdf' % fontName
ttf = TTFont(fontName, fn)
K = list(ttf.face.charToGlyph.keys())
registerFont(ttf)
c = Canvas(dmpfn)
W,H = c._pagesize
titleFontSize = 30 # title font size
titleFontName = 'Helvetica'
labelFontName = 'Courier'
fontSize = 10
border = 36
dx0 = stringWidth('12345: ', fontName, fontSize)
dx = dx0+20
dy = 20
K.sort()
y = 0
page = 0
for i, k in enumerate(K):
if y<border:
if page: c.showPage()
page += 1
y = H - border - titleFontSize
c.setFont(titleFontName, titleFontSize)
c.drawCentredString(W/2.0,y, 'TrueType Font %s Page %d' %(fontName,page))
y -= 0.2*titleFontSize + dy
x = border
c.setFont(labelFontName, 10)
c.drawString(x,y,'%5.5x:' % k )
c.setFont(fontName, 10)
c.drawString(x+dx0,y,chr(k).encode('utf8'))
x += dx
if x+dx>W-border:
x = border
y -= dy
c.showPage()
c.save()
if verbose:
print('Font %s("%s") has %d glyphs\ndumped to "%s"' % (fontName,fn,len(K),dmpfn))
if __name__=='__main__':
import sys, glob
if '--verbose' in sys.argv:
sys.argv.remove('--verbose')
verbose = 1
else:
verbose = 0
for a in sys.argv[1:]:
for fn in glob.glob(a):
dumpttf(fn, verbose=verbose)
| {
"repo_name": "Distrotech/reportlab",
"path": "tools/utils/dumpttf.py",
"copies": "14",
"size": "1891",
"license": "bsd-3-clause",
"hash": 2510270623860276000,
"line_mean": 30.5166666667,
"line_max": 89,
"alpha_frac": 0.5637228979,
"autogenerated": false,
"ratio": 3.2435677530017153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__=('dumpttf',)
def dumpttf(fn,fontName=None, verbose=0):
'''dump out known glyphs from a ttf file'''
import os
if not os.path.isfile(fn):
raise IOError('No such file "%s"' % fn)
from reportlab.pdfbase.pdfmetrics import registerFont, stringWidth
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen.canvas import Canvas
if fontName is None:
fontName = os.path.splitext(os.path.basename(fn))[0]
dmpfn = '%s-ttf-dump.pdf' % fontName
ttf = TTFont(fontName, fn)
K = ttf.face.charToGlyph.keys()
registerFont(ttf)
c = Canvas(dmpfn)
W,H = c._pagesize
titleFontSize = 30 # title font size
titleFontName = 'Helvetica'
labelFontName = 'Courier'
fontSize = 10
border = 36
dx0 = stringWidth('12345: ', fontName, fontSize)
dx = dx0+20
dy = 20
K.sort()
y = 0
page = 0
for i, k in enumerate(K):
if y<border:
if page: c.showPage()
page += 1
y = H - border - titleFontSize
c.setFont(titleFontName, titleFontSize)
c.drawCentredString(W/2.0,y, 'TrueType Font %s Page %d' %(fontName,page))
y -= 0.2*titleFontSize + dy
x = border
c.setFont(labelFontName, 10)
c.drawString(x,y,'%5.5x:' % k )
c.setFont(fontName, 10)
c.drawString(x+dx0,y,unichr(k).encode('utf8'))
x += dx
if x+dx>W-border:
x = border
y -= dy
c.showPage()
c.save()
if verbose:
print 'Font %s("%s") has %d glyphs\ndumped to "%s"' % (fontName,fn,len(K),dmpfn)
if __name__=='__main__':
import sys, glob
if '--verbose' in sys.argv:
sys.argv.remove('--verbose')
verbose = 1
else:
verbose = 0
for a in sys.argv[1:]:
for fn in glob.glob(a):
dumpttf(fn, verbose=verbose)
| {
"repo_name": "nickpack/reportlab",
"path": "tools/utils/dumpttf.py",
"copies": "1",
"size": "1947",
"license": "bsd-3-clause",
"hash": 5471645766530230000,
"line_mean": 30.45,
"line_max": 88,
"alpha_frac": 0.5469953775,
"autogenerated": false,
"ratio": 3.333904109589041,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4380899487089041,
"avg_score": null,
"num_lines": null
} |
''' All DUT alignment functions in space and time are listed here plus additional alignment check functions'''
from __future__ import division
import logging
import re
import os
import progressbar
import warnings
from collections import Iterable
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
import tables as tb
import numpy as np
from scipy.optimize import curve_fit, minimize_scalar, leastsq, basinhopping, OptimizeWarning, minimize
from matplotlib.backends.backend_pdf import PdfPages
from testbeam_analysis.tools import analysis_utils
from testbeam_analysis.tools import plot_utils
from testbeam_analysis.tools import geometry_utils
from testbeam_analysis.tools import data_selection
# Imports for track based alignment
from testbeam_analysis.track_analysis import fit_tracks
from testbeam_analysis.result_analysis import calculate_residuals
warnings.simplefilter("ignore", OptimizeWarning) # Fit errors are handled internally, turn of warnings
def correlate_cluster(input_cluster_files, output_correlation_file, n_pixels, pixel_size=None, dut_names=None, plot=True, chunk_size=4999999):
'''"Calculates the correlation histograms from the cluster arrays.
The 2D correlation array of pairs of two different devices are created on event basis.
All permutations are considered (all clusters of the first device are correlated with all clusters of the second device).
Parameters
----------
input_cluster_files : iterable
Iterable of filenames of the cluster files.
output_correlation_file : string
Filename of the output correlation file with the correlation histograms.
n_pixels : iterable of tuples
One tuple per DUT describing the total number of pixels (column/row),
e.g. for two FE-I4 DUTs [(80, 336), (80, 336)].
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension (column/row),
e.g. for two FE-I4 DUTs [(250, 50), (250, 50)].
If None, assuming same pixel size for all DUTs.
dut_names : iterable of strings
Names of the DUTs. If None, the DUT index will be used.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('=== Correlating the index of %d DUTs ===', len(input_cluster_files))
with tb.open_file(output_correlation_file, mode="w") as out_file_h5:
n_duts = len(input_cluster_files)
# Result arrays to be filled
column_correlations = []
row_correlations = []
for dut_index in range(1, n_duts):
shape_column = (n_pixels[dut_index][0], n_pixels[0][0])
shape_row = (n_pixels[dut_index][1], n_pixels[0][1])
column_correlations.append(np.zeros(shape_column, dtype=np.int32))
row_correlations.append(np.zeros(shape_row, dtype=np.int32))
start_indices = [None] * n_duts # Store the loop indices for speed up
with tb.open_file(input_cluster_files[0], mode='r') as in_file_h5: # Open DUT0 cluster file
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80)
progress_bar.start()
pool = Pool() # Provide worker pool
for cluster_dut_0, start_indices[0] in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, start_index=start_indices[0], chunk_size=chunk_size): # Loop over the cluster of DUT0 in chunks
actual_event_numbers = cluster_dut_0[:]['event_number']
# Create correlation histograms to the reference device for all other devices
# Do this in parallel to safe time
dut_results = []
for dut_index, cluster_file in enumerate(input_cluster_files[1:], start=1): # Loop over the other cluster files
dut_results.append(pool.apply_async(_correlate_cluster, kwds={'cluster_dut_0': cluster_dut_0,
'cluster_file': cluster_file,
'start_index': start_indices[dut_index],
'start_event_number': actual_event_numbers[0],
'stop_event_number': actual_event_numbers[-1] + 1,
'column_correlation': column_correlations[dut_index - 1],
'row_correlation': row_correlations[dut_index - 1],
'chunk_size': chunk_size
}
))
# Collect results when available
for dut_index, dut_result in enumerate(dut_results, start=1):
(start_indices[dut_index], column_correlations[dut_index - 1], row_correlations[dut_index - 1]) = dut_result.get()
progress_bar.update(start_indices[0])
pool.close()
pool.join()
# Store the correlation histograms
for dut_index in range(n_duts - 1):
out_col = out_file_h5.create_carray(out_file_h5.root, name='CorrelationColumn_%d_0' % (dut_index + 1), title='Column Correlation between DUT%d and DUT%d' % (dut_index + 1, 0), atom=tb.Atom.from_dtype(column_correlations[dut_index].dtype), shape=column_correlations[dut_index].shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_row = out_file_h5.create_carray(out_file_h5.root, name='CorrelationRow_%d_0' % (dut_index + 1), title='Row Correlation between DUT%d and DUT%d' % (dut_index + 1, 0), atom=tb.Atom.from_dtype(row_correlations[dut_index].dtype), shape=row_correlations[dut_index].shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_col.attrs.filenames = [str(input_cluster_files[0]), str(input_cluster_files[dut_index])]
out_row.attrs.filenames = [str(input_cluster_files[0]), str(input_cluster_files[dut_index])]
out_col[:] = column_correlations[dut_index]
out_row[:] = row_correlations[dut_index]
progress_bar.finish()
if plot:
plot_utils.plot_correlations(input_correlation_file=output_correlation_file, pixel_size=pixel_size, dut_names=dut_names)
def merge_cluster_data(input_cluster_files, output_merged_file, n_pixels, pixel_size, chunk_size=4999999):
'''Takes the cluster from all cluster files and merges them into one big table aligned at a common event number.
Empty entries are signaled with column = row = charge = nan. Position is translated from indices to um. The
local coordinate system origin (0, 0) is defined in the sensor center, to decouple translation and rotation.
Cluster position errors are calculated from cluster dimensions.
Parameters
----------
input_cluster_files : list of pytables files
File name of the input cluster files with correlation data.
output_merged_file : pytables file
File name of the output tracklet file.
n_pixels : iterable of tuples
One tuple per DUT describing the total number of pixels (column/row),
e.g. for two FE-I4 DUTs [(80, 336), (80, 336)].
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension (column/row),
e.g. for two FE-I4 DUTs [(250, 50), (250, 50)].
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('=== Merge cluster files from %d DUTs to merged hit file ===', len(input_cluster_files))
# Create result array description, depends on the number of DUTs
description = [('event_number', np.int64)]
for index, _ in enumerate(input_cluster_files):
description.append(('x_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('y_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('z_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('charge_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('n_hits_dut_%d' % index, np.int8))
description.extend([('track_quality', np.uint32), ('n_tracks', np.int8)])
for index, _ in enumerate(input_cluster_files):
description.append(('xerr_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('yerr_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('zerr_dut_%d' % index, np.float))
start_indices_merging_loop = [None] * len(input_cluster_files) # Store the merging loop indices for speed up
start_indices_data_loop = [None] * len(input_cluster_files) # Additional store indices for the data loop
actual_start_event_number = None # Defines the first event number of the actual chunk for speed up. Cannot be deduced from DUT0, since this DUT could have missing event numbers.
# Merge the cluster data from different DUTs into one table
with tb.open_file(output_merged_file, mode='w') as out_file_h5:
merged_cluster_table = out_file_h5.create_table(out_file_h5.root, name='MergedCluster', description=np.zeros((1,), dtype=description).dtype, title='Merged cluster on event number', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
with tb.open_file(input_cluster_files[0], mode='r') as in_file_h5: # Open DUT0 cluster file
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80)
progress_bar.start()
for actual_cluster_dut_0, start_indices_data_loop[0] in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, start_index=start_indices_data_loop[0], start_event_number=actual_start_event_number, stop_event_number=None, chunk_size=chunk_size): # Loop over the cluster of DUT0 in chunks
actual_event_numbers = actual_cluster_dut_0[:]['event_number']
# First loop: calculate the minimum event number indices needed to merge all cluster from all files to this event number index
common_event_numbers = actual_event_numbers
for dut_index, cluster_file in enumerate(input_cluster_files[1:], start=1): # Loop over the other cluster files
with tb.open_file(cluster_file, mode='r') as actual_in_file_h5: # Open DUT0 cluster file
for actual_cluster, start_indices_merging_loop[dut_index] in analysis_utils.data_aligned_at_events(actual_in_file_h5.root.Cluster, start_index=start_indices_merging_loop[dut_index], start_event_number=actual_start_event_number, stop_event_number=actual_event_numbers[-1] + 1, chunk_size=chunk_size, fail_on_missing_events=False): # Loop over the cluster in the actual cluster file in chunks
common_event_numbers = analysis_utils.get_max_events_in_both_arrays(common_event_numbers, actual_cluster[:]['event_number'])
merged_cluster_array = np.zeros(shape=(common_event_numbers.shape[0],), dtype=description) # resulting array to be filled
for index, _ in enumerate(input_cluster_files):
# for no hit: column = row = charge = nan
merged_cluster_array['x_dut_%d' % (index)] = np.nan
merged_cluster_array['y_dut_%d' % (index)] = np.nan
merged_cluster_array['z_dut_%d' % (index)] = np.nan
merged_cluster_array['charge_dut_%d' % (index)] = np.nan
merged_cluster_array['xerr_dut_%d' % (index)] = np.nan
merged_cluster_array['yerr_dut_%d' % (index)] = np.nan
merged_cluster_array['zerr_dut_%d' % (index)] = np.nan
# Set the event number
merged_cluster_array['event_number'] = common_event_numbers[:]
# Fill result array with DUT 0 data
actual_cluster_dut_0 = analysis_utils.map_cluster(common_event_numbers, actual_cluster_dut_0)
# Select real hits, values with nan are virtual hits
selection = ~np.isnan(actual_cluster_dut_0['mean_column'])
# Convert indices to positions, origin defined in the center of the sensor
merged_cluster_array['x_dut_0'][selection] = pixel_size[0][0] * (actual_cluster_dut_0['mean_column'][selection] - 0.5 - (0.5 * n_pixels[0][0]))
merged_cluster_array['y_dut_0'][selection] = pixel_size[0][1] * (actual_cluster_dut_0['mean_row'][selection] - 0.5 - (0.5 * n_pixels[0][1]))
merged_cluster_array['z_dut_0'][selection] = 0.0
xerr = np.zeros(selection.shape)
yerr = np.zeros(selection.shape)
zerr = np.zeros(selection.shape)
xerr[selection] = actual_cluster_dut_0['err_column'][selection] * pixel_size[0][0]
yerr[selection] = actual_cluster_dut_0['err_row'][selection] * pixel_size[0][1]
merged_cluster_array['xerr_dut_0'][selection] = xerr[selection]
merged_cluster_array['yerr_dut_0'][selection] = yerr[selection]
merged_cluster_array['zerr_dut_0'][selection] = zerr[selection]
merged_cluster_array['charge_dut_0'][selection] = actual_cluster_dut_0['charge'][selection]
merged_cluster_array['n_hits_dut_0'][selection] = actual_cluster_dut_0['n_hits'][selection]
# Fill result array with other DUT data
# Second loop: get the cluster from all files and merge them to the common event number
for dut_index, cluster_file in enumerate(input_cluster_files[1:], start=1): # Loop over the other cluster files
with tb.open_file(cluster_file, mode='r') as actual_in_file_h5: # Open other DUT cluster file
for actual_cluster_dut, start_indices_data_loop[dut_index] in analysis_utils.data_aligned_at_events(actual_in_file_h5.root.Cluster, start_index=start_indices_data_loop[dut_index], start_event_number=common_event_numbers[0], stop_event_number=common_event_numbers[-1] + 1, chunk_size=chunk_size, fail_on_missing_events=False): # Loop over the cluster in the actual cluster file in chunks
actual_cluster_dut = analysis_utils.map_cluster(common_event_numbers, actual_cluster_dut)
# Select real hits, values with nan are virtual hits
selection = ~np.isnan(actual_cluster_dut['mean_column'])
# Convert indices to positions, origin in the center of the sensor, remaining DUTs
merged_cluster_array['x_dut_%d' % (dut_index)][selection] = pixel_size[dut_index][0] * (actual_cluster_dut['mean_column'][selection] - 0.5 - (0.5 * n_pixels[dut_index][0]))
merged_cluster_array['y_dut_%d' % (dut_index)][selection] = pixel_size[dut_index][1] * (actual_cluster_dut['mean_row'][selection] - 0.5 - (0.5 * n_pixels[dut_index][1]))
merged_cluster_array['z_dut_%d' % (dut_index)][selection] = 0.0
xerr = np.zeros(selection.shape)
yerr = np.zeros(selection.shape)
zerr = np.zeros(selection.shape)
xerr[selection] = actual_cluster_dut['err_column'][selection] * pixel_size[dut_index][0]
yerr[selection] = actual_cluster_dut['err_row'][selection] * pixel_size[dut_index][1]
merged_cluster_array['xerr_dut_%d' % (dut_index)][selection] = xerr[selection]
merged_cluster_array['yerr_dut_%d' % (dut_index)][selection] = yerr[selection]
merged_cluster_array['zerr_dut_%d' % (dut_index)][selection] = zerr[selection]
merged_cluster_array['charge_dut_%d' % (dut_index)][selection] = actual_cluster_dut['charge'][selection]
merged_cluster_array['n_hits_dut_%d' % (dut_index)][selection] = actual_cluster_dut['n_hits'][selection]
merged_cluster_table.append(merged_cluster_array)
actual_start_event_number = common_event_numbers[-1] + 1 # Set the starting event number for the next chunked read
progress_bar.update(start_indices_data_loop[0])
progress_bar.finish()
def prealignment(input_correlation_file, output_alignment_file, z_positions, pixel_size, s_n=0.1, fit_background=False, reduce_background=False, dut_names=None, no_fit=False, non_interactive=True, iterations=3, plot=True, gui=False, queue=False):
'''Deduce a pre-alignment from the correlations, by fitting the correlations with a straight line (gives offset, slope, but no tild angles).
The user can define cuts on the fit error and straight line offset in an interactive way.
Parameters
----------
input_correlation_file : string
Filename of the input correlation file.
output_alignment_file : string
Filename of the output alignment file.
z_positions : iterable
The z positions of the DUTs in um.
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension (column/row),
e.g. for two FE-I4 DUTs [(250, 50), (250, 50)].
s_n : float
The signal to noise ratio for peak signal over background peak. This should be specified when the background is fitted with a gaussian function.
Usually data with a lot if tracks per event have a gaussian background. A good S/N value can be estimated by investigating the correlation plot.
The default value is usually fine.
fit_background : bool
Data with a lot if tracks per event have a gaussian background from the beam profile. Also try to fit this background to determine the correlation
peak correctly. If you see a clear 2D gaussian in the correlation plot this shoud be activated. If you have 1-2 tracks per event and large pixels
this option should be off, because otherwise overfitting is possible.
reduce_background : bool
Reduce background (uncorrelated events) by using SVD of the 2D correlation array.
dut_names : iterable
Names of the DUTs. If None, the DUT index will be used.
no_fit : bool
Use Hough transformation to calculate slope and offset.
non_interactive : bool
Deactivate user interaction and estimate fit range automatically.
iterations : uint
The number of iterations in non-interactive mode.
plot : bool
If True, create additional output plots.
gui : bool
If True, this function is excecuted from GUI and returns figures
queue : bool, dict
If gui is True and non_interactive is False, queue is a dict with a in and output queue to communicate with GUI thread
'''
logging.info('=== Pre-alignment ===')
if no_fit:
if not reduce_background:
logging.warning("no_fit is True, setting reduce_background to True")
reduce_background = True
if reduce_background:
if fit_background:
logging.warning("reduce_background is True, setting fit_background to False")
fit_background = False
if plot is True and not gui:
output_pdf = PdfPages(os.path.splitext(output_alignment_file)[0] + '_prealigned.pdf', keep_empty=False)
else:
output_pdf = None
figs = [] if gui else None
with tb.open_file(input_correlation_file, mode="r") as in_file_h5:
n_duts = len(in_file_h5.list_nodes("/")) // 2 + 1 # no correlation for reference DUT0
result = np.zeros(shape=(n_duts,), dtype=[('DUT', np.uint8), ('column_c0', np.float), ('column_c0_error', np.float), ('column_c1', np.float), ('column_c1_error', np.float), ('column_sigma', np.float), ('column_sigma_error', np.float), ('row_c0', np.float), ('row_c0_error', np.float), ('row_c1', np.float), ('row_c1_error', np.float), ('row_sigma', np.float), ('row_sigma_error', np.float), ('z', np.float)])
# Set std. settings for reference DUT0
result[0]['column_c0'], result[0]['column_c0_error'] = 0.0, 0.0
result[0]['column_c1'], result[0]['column_c1_error'] = 1.0, 0.0
result[0]['row_c0'], result[0]['row_c0_error'] = 0.0, 0.0
result[0]['row_c1'], result[0]['row_c1_error'] = 1.0, 0.0
result[0]['z'] = z_positions[0]
for node in in_file_h5.root:
table_prefix = 'column' if 'column' in node.name.lower() else 'row'
indices = re.findall(r'\d+', node.name)
dut_idx = int(indices[0])
ref_idx = int(indices[1])
result[dut_idx]['DUT'] = dut_idx
dut_name = dut_names[dut_idx] if dut_names else ("DUT" + str(dut_idx))
ref_name = dut_names[ref_idx] if dut_names else ("DUT" + str(ref_idx))
logging.info('Aligning data from %s', node.name)
if "column" in node.name.lower():
pixel_size_dut, pixel_size_ref = pixel_size[dut_idx][0], pixel_size[ref_idx][0]
else:
pixel_size_dut, pixel_size_ref = pixel_size[dut_idx][1], pixel_size[ref_idx][1]
data = node[:]
n_pixel_dut, n_pixel_ref = data.shape[0], data.shape[1]
# Initialize arrays with np.nan (invalid), adding 0.5 to change from index to position
# matrix index 0 is cluster index 1 ranging from 0.5 to 1.4999, which becomes position 0.0 to 0.999 with center at 0.5, etc.
x_ref = (np.linspace(0.0, n_pixel_ref, num=n_pixel_ref, endpoint=False, dtype=np.float) + 0.5)
x_dut = (np.linspace(0.0, n_pixel_dut, num=n_pixel_dut, endpoint=False, dtype=np.float) + 0.5)
coeff_fitted = [None] * n_pixel_dut
mean_fitted = np.empty(shape=(n_pixel_dut,), dtype=np.float) # Peak of the Gauss fit
mean_fitted.fill(np.nan)
mean_error_fitted = np.empty(shape=(n_pixel_dut,), dtype=np.float) # Error of the fit of the peak
mean_error_fitted.fill(np.nan)
sigma_fitted = np.empty(shape=(n_pixel_dut,), dtype=np.float) # Sigma of the Gauss fit
sigma_fitted.fill(np.nan)
chi2 = np.empty(shape=(n_pixel_dut,), dtype=np.float) # Chi2 of the fit
chi2.fill(np.nan)
n_cluster = np.sum(data, axis=1) # Number of hits per bin
if reduce_background:
uu, dd, vv = np.linalg.svd(data) # sigular value decomposition
background = np.matrix(uu[:, :1]) * np.diag(dd[:1]) * np.matrix(vv[:1, :]) # take first sigular value for background
background = np.array(background, dtype=np.int32) # make Numpy array
data = (data - background).astype(np.int32) # remove background
data -= data.min() # only positive values
if no_fit:
# calculate half hight
median = np.median(data)
median_max = np.median(np.max(data, axis=1))
half_median_data = (data > ((median + median_max) / 2))
# calculate maximum per column
max_select = np.argmax(data, axis=1)
hough_data = np.zeros_like(data)
hough_data[np.arange(data.shape[0]), max_select] = 1
# select maximums if larger than half hight
hough_data = hough_data & half_median_data
# transpose for correct angle
hough_data = hough_data.T
accumulator, theta, rho, theta_edges, rho_edges = analysis_utils.hough_transform(hough_data, theta_res=0.1, rho_res=1.0, return_edges=True)
rho_idx, th_idx = np.unravel_index(accumulator.argmax(), accumulator.shape)
rho_val, theta_val = rho[rho_idx], theta[th_idx]
slope_idx, offset_idx = -np.cos(theta_val) / np.sin(theta_val), rho_val / np.sin(theta_val)
slope = slope_idx * (pixel_size_ref / pixel_size_dut)
offset = offset_idx * pixel_size_ref
# offset in the center of the pixel matrix
offset_center = offset + slope * pixel_size_dut * n_pixel_dut * 0.5 - pixel_size_ref * n_pixel_ref * 0.5
offset_center += 0.5 * pixel_size_ref - slope * 0.5 * pixel_size_dut # correct for half bin
result[dut_idx][table_prefix + '_c0'], result[dut_idx][table_prefix + '_c0_error'] = offset_center, 0.0
result[dut_idx][table_prefix + '_c1'], result[dut_idx][table_prefix + '_c1_error'] = slope, 0.0
result[dut_idx][table_prefix + '_sigma'], result[dut_idx][table_prefix + '_sigma_error'] = 0.0, 0.0
result[dut_idx]['z'] = z_positions[dut_idx]
plot_utils.plot_hough(x=x_dut,
data=hough_data,
accumulator=accumulator,
offset=offset_idx,
slope=slope_idx,
theta_edges=theta_edges,
rho_edges=rho_edges,
n_pixel_ref=n_pixel_ref,
n_pixel_dut=n_pixel_dut,
pixel_size_ref=pixel_size_ref,
pixel_size_dut=pixel_size_dut,
ref_name=ref_name,
dut_name=dut_name,
prefix=table_prefix,
output_pdf=output_pdf,
gui=gui,
figs=figs)
else:
# fill the arrays from above with values
_fit_data(x=x_ref, data=data, s_n=s_n, coeff_fitted=coeff_fitted, mean_fitted=mean_fitted, mean_error_fitted=mean_error_fitted, sigma_fitted=sigma_fitted, chi2=chi2, fit_background=fit_background, reduce_background=reduce_background)
# Convert fit results to metric units for alignment fit
# Origin is center of pixel matrix
x_dut_scaled = (x_dut - 0.5 * n_pixel_dut) * pixel_size_dut
mean_fitted_scaled = (mean_fitted - 0.5 * n_pixel_ref) * pixel_size_ref
mean_error_fitted_scaled = mean_error_fitted * pixel_size_ref
# Selected data arrays
x_selected = x_dut.copy()
x_dut_scaled_selected = x_dut_scaled.copy()
mean_fitted_scaled_selected = mean_fitted_scaled.copy()
mean_error_fitted_scaled_selected = mean_error_fitted_scaled.copy()
sigma_fitted_selected = sigma_fitted.copy()
chi2_selected = chi2.copy()
n_cluster_selected = n_cluster.copy()
# Show the straigt line correlation fit including fit errors and offsets from the fit
# Let the user change the cuts (error limit, offset limit) and refit until result looks good
refit = True
selected_data = np.ones_like(x_dut, dtype=np.bool)
actual_iteration = 0 # Refit counter for non interactive mode
while refit:
if gui and not non_interactive:
# Put data in queue to be processed interactively on GUI thread
queue['in'].put([x_dut_scaled_selected, mean_fitted_scaled_selected,
mean_error_fitted_scaled_selected, n_cluster_selected,
ref_name, dut_name, table_prefix])
# Blocking statement to wait for processed data from GUI thread
selected_data, fit, refit = queue['out'].get()
else:
selected_data, fit, refit = plot_utils.plot_prealignments(x=x_dut_scaled_selected,
mean_fitted=mean_fitted_scaled_selected,
mean_error_fitted=mean_error_fitted_scaled_selected,
n_cluster=n_cluster_selected,
ref_name=ref_name,
dut_name=dut_name,
prefix=table_prefix,
non_interactive=non_interactive)
x_selected = x_selected[selected_data]
x_dut_scaled_selected = x_dut_scaled_selected[selected_data]
mean_fitted_scaled_selected = mean_fitted_scaled_selected[selected_data]
mean_error_fitted_scaled_selected = mean_error_fitted_scaled_selected[selected_data]
sigma_fitted_selected = sigma_fitted_selected[selected_data]
chi2_selected = chi2_selected[selected_data]
n_cluster_selected = n_cluster_selected[selected_data]
# Stop in non interactive mode if the number of refits (iterations) is reached
if non_interactive:
actual_iteration += 1
if actual_iteration >= iterations:
break
# Linear fit, usually describes correlation very well, slope is close to 1.
# With low energy beam and / or beam with diverse agular distribution, the correlation will not be perfectly straight
# Use results from straight line fit as start values for this final fit
re_fit, re_fit_pcov = curve_fit(analysis_utils.linear, x_dut_scaled_selected, mean_fitted_scaled_selected, sigma=mean_error_fitted_scaled_selected, absolute_sigma=True, p0=[fit[0], fit[1]])
# Write fit results to array
result[dut_idx][table_prefix + '_c0'], result[dut_idx][table_prefix + '_c0_error'] = re_fit[0], np.absolute(re_fit_pcov[0][0]) ** 0.5
result[dut_idx][table_prefix + '_c1'], result[dut_idx][table_prefix + '_c1_error'] = re_fit[1], np.absolute(re_fit_pcov[1][1]) ** 0.5
result[dut_idx]['z'] = z_positions[dut_idx]
# Calculate mean sigma (is a residual when assuming straight tracks) and its error and store the actual data in result array
# This error is needed for track finding and track quality determination
mean_sigma = pixel_size_ref * np.mean(np.array(sigma_fitted_selected))
mean_sigma_error = pixel_size_ref * np.std(np.array(sigma_fitted_selected)) / np.sqrt(np.array(sigma_fitted_selected).shape[0])
result[dut_idx][table_prefix + '_sigma'], result[dut_idx][table_prefix + '_sigma_error'] = mean_sigma, mean_sigma_error
# Calculate the index of the beam center based on valid indices
plot_index = np.average(x_selected - 1, weights=np.sum(data, axis=1)[np.array(x_selected - 1, dtype=np.int32)])
# Find nearest valid index to the calculated index
idx = (np.abs(x_selected - 1 - plot_index)).argmin()
plot_index = np.array(x_selected - 1, dtype=np.int32)[idx]
x_fit = np.linspace(start=x_ref.min(), stop=x_ref.max(), num=500, endpoint=True)
indices_lower = np.arange(plot_index)
indices_higher = np.arange(plot_index, n_pixel_dut)
alternating_indices = np.vstack((np.hstack([indices_higher, indices_lower[::-1]]), np.hstack([indices_lower[::-1], indices_higher]))).reshape((-1,), order='F')
unique_indices = np.unique(alternating_indices, return_index=True)[1]
alternating_indices = alternating_indices[np.sort(unique_indices)]
for plot_index in alternating_indices:
plot_correlation_fit = False
if coeff_fitted[plot_index] is not None:
plot_correlation_fit = True
break
if plot_correlation_fit:
if np.all(np.isnan(coeff_fitted[plot_index][3:6])):
y_fit = analysis_utils.gauss_offset(x_fit, *coeff_fitted[plot_index][[0, 1, 2, 6]])
fit_label = "Gauss-Offset"
else:
y_fit = analysis_utils.double_gauss_offset(x_fit, *coeff_fitted[plot_index])
fit_label = "Gauss-Gauss-Offset"
plot_utils.plot_correlation_fit(x=x_ref,
y=data[plot_index, :],
x_fit=x_fit,
y_fit=y_fit,
xlabel='%s %s' % ("Column" if "column" in node.name.lower() else "Row", ref_name),
fit_label=fit_label,
title="Correlation of %s: %s vs. %s at %s %d" % (table_prefix + "s", ref_name, dut_name, table_prefix, plot_index),
output_pdf=output_pdf,
gui=gui,
figs=figs)
else:
logging.warning("Cannot plot correlation fit, no fit data available")
# Plot selected data with fit
fit_fn = np.poly1d(re_fit[::-1])
selected_indices = np.searchsorted(x_dut_scaled, x_dut_scaled_selected)
mask = np.zeros_like(x_dut_scaled, dtype=np.bool)
mask[selected_indices] = True
plot_utils.plot_prealignment_fit(x=x_dut_scaled,
mean_fitted=mean_fitted_scaled,
mask=mask,
fit_fn=fit_fn,
fit=re_fit,
pcov=re_fit_pcov,
chi2=chi2,
mean_error_fitted=mean_error_fitted_scaled,
n_cluster=n_cluster,
n_pixel_ref=n_pixel_ref,
n_pixel_dut=n_pixel_dut,
pixel_size_ref=pixel_size_ref,
pixel_size_dut=pixel_size_dut,
ref_name=ref_name,
dut_name=dut_name,
prefix=table_prefix,
output_pdf=output_pdf,
gui=gui,
figs=figs)
if gui and not non_interactive:
queue['in'].put([None]) # Put random element in queue to signal GUI thread end of interactive prealignment
logging.info('Store pre-alignment data in %s', output_alignment_file)
with tb.open_file(output_alignment_file, mode="w") as out_file_h5:
try:
result_table = out_file_h5.create_table(out_file_h5.root, name='PreAlignment', description=result.dtype, title='Prealignment alignment from correlation', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
result_table.append(result)
except tb.exceptions.NodeError:
logging.warning('Coarse alignment table exists already. Do not create new.')
if output_pdf is not None:
output_pdf.close()
if gui:
return figs
def _fit_data(x, data, s_n, coeff_fitted, mean_fitted, mean_error_fitted, sigma_fitted, chi2, fit_background, reduce_background):
def calc_limits_from_fit(x, coeff):
''' Calculates the fit limits from the last successfull fit.'''
limits = [
[0.1 * coeff[0], x.min(), 0.5 * coeff[2], 0.01 * coeff[3], x.min(), 0.5 * coeff[5], 0.5 * coeff[6]],
[10.0 * coeff[0], x.max(), 2.0 * coeff[2], 10.0 * coeff[3], x.max(), 2.0 * coeff[5], 2.0 * coeff[6]]
]
# Fix too small sigma, sigma < 1 is unphysical
if limits[1][2] < 1.:
limits[1][2] = 10.
return limits
def signal_sanity_check(coeff, s_n, A_peak):
''' Sanity check if signal was deducted correctly from background.
3 Conditions:
1. The given signal to noise value has to be fullfilled: S/N > Amplitude Signal / ( Amplidude background + Offset)
2. The signal + background has to be large enough: Amplidute 1 + Amplitude 2 + Offset > Data maximum / 2
3. The Signal Sigma has to be smaller than the background sigma, otherwise beam would be larger than one pixel pitch
'''
if coeff[0] < (coeff[3] + coeff[6]) * s_n or coeff[0] + coeff[3] + coeff[6] < A_peak / 2.0 or coeff[2] > coeff[5] / 2.0:
return False
return True
n_pixel_dut, n_pixel_ref = data.shape[0], data.shape[1]
# Start values for fitting
# Correlation peak
mu_peak = x[np.argmax(data, axis=1)]
A_peak = np.max(data, axis=1) # signal / correlation peak
# Background of uncorrelated data
n_entries = np.sum(data, axis=1)
A_background = np.mean(data, axis=1) # noise / background halo
mu_background = np.zeros_like(n_entries)
mu_background[n_entries > 0] = np.average(data, axis=1, weights=x)[n_entries > 0] * np.sum(x) / n_entries[n_entries > 0]
coeff = None
fit_converged = False # To signal that las fit was good, thus the results can be taken as start values for next fit
# for logging
no_correlation_indices = []
few_correlation_indices = []
# get index of the highest background value
fit_start_index = np.argmax(A_background)
indices_lower = np.arange(fit_start_index)[::-1]
indices_higher = np.arange(fit_start_index, n_pixel_dut)
stacked_indices = np.hstack([indices_lower, indices_higher])
for index in stacked_indices: # Loop over x dimension of correlation histogram
if index == fit_start_index:
if index > 0 and coeff_fitted[index - 1] is not None:
coeff = coeff_fitted[index - 1]
fit_converged = True
else:
fit_converged = False
# TODO: start fitting from the beam center to get a higher chance to pick up the correlation peak
# omit correlation fit with no entries / correlation (e.g. sensor edges, masked columns)
if np.all(data[index, :] == 0):
no_correlation_indices.append(index)
continue
# omit correlation fit if sum of correlation entries is < 1 % of total entries devided by number of indices
# (e.g. columns not in the beam)
n_cluster_curr_index = data[index, :].sum()
if fit_converged and n_cluster_curr_index < data.sum() / n_pixel_dut * 0.01:
few_correlation_indices.append(index)
continue
# Set start parameters and fit limits
# Parameters: A_1, mu_1, sigma_1, A_2, mu_2, sigma_2, offset
if fit_converged and not reduce_background: # Set start values from last successfull fit, no large difference expected
p0 = coeff # Set start values from last successfull fit
bounds = calc_limits_from_fit(x, coeff) # Set boundaries from previous converged fit
else: # No (last) successfull fit, try to dedeuce reasonable start values
p0 = [A_peak[index], mu_peak[index], 5.0, A_background[index], mu_background[index], analysis_utils.get_rms_from_histogram(data[index, :], x), 0.0]
bounds = [[0.0, x.min(), 0.0, 0.0, x.min(), 0.0, 0.0], [2.0 * A_peak[index], x.max(), x.max() - x.min(), 2.0 * A_peak[index], x.max(), np.inf, A_peak[index]]]
# Fit correlation
if fit_background: # Describe background with addidional gauss + offset
try:
coeff, var_matrix = curve_fit(analysis_utils.double_gauss_offset, x, data[index, :], p0=p0, bounds=bounds)
except RuntimeError: # curve_fit failed
fit_converged = False
else:
fit_converged = True
# do some result checks
if not signal_sanity_check(coeff, s_n, A_peak[index]):
logging.debug('No correlation peak found. Try another fit...')
# Use parameters from last fit as start parameters for the refit
y_fit = analysis_utils.double_gauss_offset(x, *coeff)
try:
coeff, var_matrix = refit_advanced(x_data=x, y_data=data[index, :], y_fit=y_fit, p0=coeff)
except RuntimeError: # curve_fit failed
fit_converged = False
else:
fit_converged = True
# Check result again:
if not signal_sanity_check(coeff, s_n, A_peak[index]):
logging.debug('No correlation peak found after refit!')
fit_converged = False
else: # Describe background with offset only.
# Change start parameters and boundaries
p0_gauss_offset = [p0_val for i, p0_val in enumerate(p0) if i in (0, 1, 2, 6)]
bounds_gauss_offset = [0, np.inf]
bounds_gauss_offset[0] = [bound_val for i, bound_val in enumerate(bounds[0]) if i in (0, 1, 2, 6)]
bounds_gauss_offset[1] = [bound_val for i, bound_val in enumerate(bounds[1]) if i in (0, 1, 2, 6)]
try:
coeff_gauss_offset, var_matrix = curve_fit(analysis_utils.gauss_offset, x, data[index, :], p0=p0_gauss_offset, bounds=bounds_gauss_offset)
except RuntimeError: # curve_fit failed
fit_converged = False
else:
# Correlation should have at least 2 entries to avoid random fluctuation peaks to be selected
if coeff_gauss_offset[0] > 2:
fit_converged = True
# Change back coefficents
coeff = np.insert(coeff_gauss_offset, 3, [np.nan] * 3) # Parameters: A_1, mu_1, sigma_1, A_2, mu_2, sigma_2, offset
else:
fit_converged = False
# Set fit results for given index if successful
if fit_converged:
coeff_fitted[index] = coeff
mean_fitted[index] = coeff[1]
mean_error_fitted[index] = np.sqrt(np.abs(np.diag(var_matrix)))[1]
sigma_fitted[index] = np.abs(coeff[2])
chi2[index] = analysis_utils.get_chi2(y_data=data[index, :], y_fit=analysis_utils.double_gauss_offset(x, *coeff))
if no_correlation_indices:
logging.info('No correlation entries for indices %s. Omit correlation fit.', str(no_correlation_indices)[1:-1])
if few_correlation_indices:
logging.info('Very few correlation entries for indices %s. Omit correlation fit.', str(few_correlation_indices)[1:-1])
def refit_advanced(x_data, y_data, y_fit, p0):
''' Substract the fit from the data, thus only the small signal peak should be left.
Fit this peak, and refit everything with start values'''
y_peak = y_data - y_fit # Fit most likely only describes background, thus substract it
peak_A = np.max(y_peak) # Determine start value for amplitude
peak_mu = np.argmax(y_peak) # Determine start value for mu
fwhm_1, fwhm_2 = analysis_utils.fwhm(x_data, y_peak)
peak_sigma = (fwhm_2 - fwhm_1) / 2.35 # Determine start value for sigma
# Fit a Gauss + Offset to the background substracted data
coeff_peak, _ = curve_fit(analysis_utils.gauss_offset_slope, x_data, y_peak, p0=[peak_A, peak_mu, peak_sigma, 0.0, 0.0], bounds=([0.0, 0.0, 0.0, -10000.0, -10.0], [1.1 * peak_A, np.inf, np.inf, 10000.0, 10.0]))
# Refit orignial double Gauss function with proper start values for the small signal peak
coeff, var_matrix = curve_fit(analysis_utils.double_gauss_offset, x_data, y_data, p0=[coeff_peak[0], coeff_peak[1], coeff_peak[2], p0[3], p0[4], p0[5], p0[6]], bounds=[0.0, np.inf])
return coeff, var_matrix
def apply_alignment(input_hit_file, input_alignment_file, output_hit_file, inverse=False,
force_prealignment=False, no_z=False, use_duts=None, chunk_size=1000000):
''' Takes a file with tables containing hit information (x, y, z) and applies the alignment to each DUT hit (positions and errors).
The alignment data is used. If this is not available a fallback to the pre-alignment is done.
One can also inverse the alignment or apply the alignment without changing the z position.
Note:
-----
This function cannot be easily made faster with multiprocessing since the computation function (apply_alignment_to_chunk) does not
contribute significantly to the runtime (< 20 %), but the copy overhead for not shared memory needed for multipgrocessing is higher.
Also the hard drive IO can be limiting (30 Mb/s read, 20 Mb/s write to the same disk)
Parameters
----------
input_hit_file : string
Filename of the input hits file (e.g. merged data file, tracklets file, etc.).
input_alignment_file : string
Filename of the input alignment file.
output_hit_file : string
Filename of the output hits file with hit data after alignment was applied.
inverse : bool
If True, apply the inverse alignment.
force_prealignment : bool
If True, use pre-alignment, even if alignment data is availale.
no_z : bool
If True, do not change the z alignment. Needed since the z position is special for x / y based plane measurements.
use_duts : iterable
Iterable of DUT indices to apply the alignment to. If None, use all DUTs.
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('== Apply alignment to %s ==', input_hit_file)
use_prealignment = True if force_prealignment else False
try:
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
if use_prealignment:
logging.info('Use pre-alignment data')
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
else:
logging.info('Use alignment data')
alignment = in_file_h5.root.Alignment[:]
n_duts = alignment.shape[0]
except TypeError: # The input_alignment_file is an array
alignment = input_alignment_file
try: # Check if array is prealignent array
alignment['column_c0']
logging.info('Use pre-alignment data')
n_duts = prealignment.shape[0]
use_prealignment = True
except ValueError:
logging.info('Use alignment data')
n_duts = alignment.shape[0]
use_prealignment = False
def apply_alignment_to_chunk(hits_chunk, dut_index, use_prealignment, alignment, inverse, no_z):
if use_prealignment: # Apply transformation from pre-alignment information
(hits_chunk['x_dut_%d' % dut_index],
hits_chunk['y_dut_%d' % dut_index],
hit_z,
hits_chunk['xerr_dut_%d' % dut_index],
hits_chunk['yerr_dut_%d' % dut_index],
hits_chunk['zerr_dut_%d' % dut_index]) = geometry_utils.apply_alignment(
hits_x=hits_chunk['x_dut_%d' % dut_index],
hits_y=hits_chunk['y_dut_%d' % dut_index],
hits_z=hits_chunk['z_dut_%d' % dut_index],
hits_xerr=hits_chunk['xerr_dut_%d' % dut_index],
hits_yerr=hits_chunk['yerr_dut_%d' % dut_index],
hits_zerr=hits_chunk['zerr_dut_%d' % dut_index],
dut_index=dut_index,
prealignment=prealignment,
inverse=inverse)
else: # Apply transformation from fine alignment information
(hits_chunk['x_dut_%d' % dut_index],
hits_chunk['y_dut_%d' % dut_index],
hit_z,
hits_chunk['xerr_dut_%d' % dut_index],
hits_chunk['yerr_dut_%d' % dut_index],
hits_chunk['zerr_dut_%d' % dut_index]) = geometry_utils.apply_alignment(
hits_x=hits_chunk['x_dut_%d' % dut_index],
hits_y=hits_chunk['y_dut_%d' % dut_index],
hits_z=hits_chunk['z_dut_%d' % dut_index],
hits_xerr=hits_chunk['xerr_dut_%d' % dut_index],
hits_yerr=hits_chunk['yerr_dut_%d' % dut_index],
hits_zerr=hits_chunk['zerr_dut_%d' % dut_index],
dut_index=dut_index,
alignment=alignment,
inverse=inverse)
if not no_z:
hits_chunk['z_dut_%d' % dut_index] = hit_z
# Looper over the hits of all DUTs of all hit tables in chunks and apply the alignment
with tb.open_file(input_hit_file, mode='r') as in_file_h5:
with tb.open_file(output_hit_file, mode='w') as out_file_h5:
for node in in_file_h5.root: # Loop over potential hit tables in data file
hits = node
new_node_name = hits.name
if new_node_name == 'MergedCluster': # Merged cluster with alignment are tracklets
new_node_name = 'Tracklets'
hits_aligned_table = out_file_h5.create_table(out_file_h5.root, name=new_node_name, description=np.zeros((1,), dtype=hits.dtype).dtype, title=hits.title, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hits.shape[0], term_width=80)
progress_bar.start()
for hits_chunk, index in analysis_utils.data_aligned_at_events(hits, chunk_size=chunk_size): # Loop over the hits
for dut_index in range(0, n_duts): # Loop over the DUTs in the hit table
if use_duts is not None and dut_index not in use_duts: # omit DUT
continue
apply_alignment_to_chunk(hits_chunk=hits_chunk, dut_index=dut_index, use_prealignment=use_prealignment, alignment=prealignment if use_prealignment else alignment, inverse=inverse, no_z=no_z)
hits_aligned_table.append(hits_chunk)
progress_bar.update(index)
progress_bar.finish()
logging.debug('File with realigned hits %s', output_hit_file)
def alignment(input_track_candidates_file, input_alignment_file, n_pixels, pixel_size, align_duts=None, selection_fit_duts=None, selection_hit_duts=None, selection_track_quality=1, initial_rotation=None, initial_translation=None, max_iterations=10, use_n_tracks=200000, plot=False, chunk_size=100000):
''' This function does an alignment of the DUTs and sets translation and rotation values for all DUTs.
The reference DUT defines the global coordinate system position at 0, 0, 0 and should be well in the beam and not heavily rotated.
To solve the chicken-and-egg problem that a good dut alignment needs hits belonging to one track, but good track finding needs a good dut alignment this
function work only on already prealigned hits belonging to one track. Thus this function can be called only after track finding.
These steps are done
1. Take the found tracks and revert the pre-alignment
2. Take the track hits belonging to one track and fit tracks for all DUTs
3. Calculate the residuals for each DUT
4. Deduce rotations from the residuals and apply them to the hits
5. Deduce the translation of each plane
6. Store and apply the new alignment
repeat step 3 - 6 until the total residual does not decrease (RMS_total = sqrt(RMS_x_1^2 + RMS_y_1^2 + RMS_x_2^2 + RMS_y_2^2 + ...))
Parameters
----------
input_track_candidates_file : string
file name with the track candidates table
input_alignment_file : pytables file
File name of the input aligment data
n_pixels : iterable of tuples
One tuple per DUT describing the total number of pixels (column/row),
e.g. for two FE-I4 DUTs [(80, 336), (80, 336)].
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension (column/row),
e.g. for two FE-I4 DUTs [(250, 50), (250, 50)].
align_duts : iterable or iterable of iterable
The combination of duts that are algined at once. One should always align the high resolution planes first.
E.g. for a telesope (first and last 3 planes) with 2 devices in the center (3, 4):
align_duts=[[0, 1, 2, 5, 6, 7], # align the telescope planes first
[4], # Align first DUT
[3]], # Align second DUT
selection_fit_duts : iterable or iterable of iterable
Defines for each align_duts combination wich devices to use in the track fit.
E.g. To use only the telescope planes (first and last 3 planes) but not the 2 center devices
selection_fit_duts=[0, 1, 2, 5, 6, 7]
selection_hit_duts : iterable or iterable of iterable
Defines for each align_duts combination wich devices must have a hit to use the track for fitting. The hit
does not have to be used in the fit itself! This is useful for time reference planes.
E.g. To use telescope planes (first and last 3 planes) + time reference plane (3)
selection_hit_duts = [0, 1, 2, 4, 5, 6, 7]
selection_track_quality : uint or iterable or iterable of iterable
Track quality for each hit DUT.
initial_rotation : array
Initial rotation array.
initial_translation : array
Initial translation array.
max_iterations : uint
Maximum number of iterations of calc residuals, apply rotation refit loop until constant result is expected.
Usually the procedure converges rather fast (< 5 iterations)
use_n_tracks : uint
Defines the amount of tracks to be used for the alignment. More tracks can potentially make the result
more precise, but will also increase the calculation time.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('=== Aligning DUTs ===')
# Open the pre-alignment and create empty alignment info (at the beginning only the z position is set)
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
alignment_parameters = _create_alignment_array(n_duts)
alignment_parameters['translation_z'] = prealignment['z']
if initial_rotation:
if isinstance(initial_rotation[0], Iterable):
for dut_index in range(n_duts):
alignment_parameters['alpha'][dut_index] = initial_rotation[dut_index][0]
alignment_parameters['beta'][dut_index] = initial_rotation[dut_index][1]
alignment_parameters['gamma'][dut_index] = initial_rotation[dut_index][2]
else:
for dut_index in range(n_duts):
alignment_parameters['alpha'][dut_index] = initial_rotation[0]
alignment_parameters['beta'][dut_index] = initial_rotation[1]
alignment_parameters['gamma'][dut_index] = initial_rotation[2]
if initial_translation:
if isinstance(initial_translation[0], Iterable):
for dut_index in range(n_duts):
alignment_parameters['translation_x'][dut_index] = initial_translation[dut_index][0]
alignment_parameters['translation_y'][dut_index] = initial_translation[dut_index][1]
else:
for dut_index in range(n_duts):
alignment_parameters['translation_x'][dut_index] = initial_translation[0]
alignment_parameters['translation_y'][dut_index] = initial_translation[1]
if np.any(np.abs(alignment_parameters['alpha']) > np.pi / 4.) or np.any(np.abs(alignment_parameters['beta']) > np.pi / 4.) or np.any(np.abs(alignment_parameters['gamma']) > np.pi / 4.):
logging.warning('A rotation angle > pi / 4 is not supported, you should set the correct angle and translation as a start parameter, sorry!')
geometry_utils.store_alignment_parameters(
input_alignment_file,
alignment_parameters=alignment_parameters,
mode='absolute')
# Create list with combinations of DUTs to align
if align_duts is None: # If None: align all DUTs
align_duts = range(n_duts)
# Check for value errors
if not isinstance(align_duts, Iterable):
raise ValueError("align_duts is no iterable")
elif not align_duts: # empty iterable
raise ValueError("align_duts has no items")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), align_duts)):
align_duts = [align_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), align_duts)):
raise ValueError("not all items in align_duts are iterable")
# Finally check length of all iterables in iterable
for dut in align_duts:
if not dut: # check the length of the items
raise ValueError("item in align_duts has length 0")
# Check if some DUTs will not be aligned
all_align_duts = []
for duts in align_duts:
all_align_duts.extend(duts)
no_align_duts = set(range(n_duts)) - set(all_align_duts)
if no_align_duts:
logging.warning('These DUTs will not be aligned: %s', ", ".join(str(align_dut) for align_dut in no_align_duts))
# Create track, hit selection
if selection_hit_duts is None: # If None: use all DUTs
selection_hit_duts = []
# copy each item
for duts in align_duts:
selection_hit_duts.append(duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(selection_hit_duts, Iterable):
raise ValueError("selection_hit_duts is no iterable")
elif not selection_hit_duts: # empty iterable
raise ValueError("selection_hit_duts has no items")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), selection_hit_duts)):
selection_hit_duts = [selection_hit_duts[:] for _ in align_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), selection_hit_duts)):
raise ValueError("not all items in selection_hit_duts are iterable")
# Finally check length of all arrays
if len(selection_hit_duts) != len(align_duts): # empty iterable
raise ValueError("selection_hit_duts has the wrong length")
for hit_dut in selection_hit_duts:
if len(hit_dut) < 2: # check the length of the items
raise ValueError("item in selection_hit_duts has length < 2")
# Create track, hit selection
if selection_fit_duts is None: # If None: use all DUTs
selection_fit_duts = []
# copy each item
for hit_duts in selection_hit_duts:
selection_fit_duts.append(hit_duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(selection_fit_duts, Iterable):
raise ValueError("selection_fit_duts is no iterable")
elif not selection_fit_duts: # empty iterable
raise ValueError("selection_fit_duts has no items")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), selection_fit_duts)):
selection_fit_duts = [selection_fit_duts[:] for _ in align_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), selection_fit_duts)):
raise ValueError("not all items in selection_fit_duts are iterable")
# Finally check length of all arrays
if len(selection_fit_duts) != len(align_duts): # empty iterable
raise ValueError("selection_fit_duts has the wrong length")
for index, fit_dut in enumerate(selection_fit_duts):
if len(fit_dut) < 2: # check the length of the items
raise ValueError("item in selection_fit_duts has length < 2")
if set(fit_dut) - set(selection_hit_duts[index]): # fit DUTs are required to have a hit
raise ValueError("DUT in selection_fit_duts is not in selection_hit_duts")
# Create track, hit selection
if not isinstance(selection_track_quality, Iterable): # all items the same, special case for selection_track_quality
selection_track_quality = [[selection_track_quality] * len(hit_duts) for hit_duts in selection_hit_duts] # every hit DUTs require a track quality value
# Check iterable and length
if not isinstance(selection_track_quality, Iterable):
raise ValueError("selection_track_quality is no iterable")
elif not selection_track_quality: # empty iterable
raise ValueError("selection_track_quality has no items")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), selection_track_quality)):
selection_track_quality = [selection_track_quality for _ in align_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), selection_track_quality)):
raise ValueError("not all items in selection_track_quality are iterable")
# Finally check length of all arrays
if len(selection_track_quality) != len(align_duts): # empty iterable
raise ValueError("selection_track_quality has the wrong length")
for index, track_quality in enumerate(selection_track_quality):
if len(track_quality) != len(selection_hit_duts[index]): # check the length of each items
raise ValueError("item in selection_track_quality and selection_hit_duts does not have the same length")
# Loop over all combinations of DUTs to align, simplest case: use all DUTs at once to align
# Usual case: align high resolution devices first, then other devices
for index, actual_align_duts in enumerate(align_duts):
logging.info('Aligning DUTs: %s', ", ".join(str(dut) for dut in actual_align_duts))
_duts_alignment(
track_candidates_file=input_track_candidates_file,
alignment_file=input_alignment_file,
alignment_index=index,
align_duts=actual_align_duts,
selection_fit_duts=selection_fit_duts[index],
selection_hit_duts=selection_hit_duts[index],
selection_track_quality=selection_track_quality[index],
n_pixels=n_pixels,
pixel_size=pixel_size,
use_n_tracks=use_n_tracks,
n_duts=n_duts,
max_iterations=max_iterations,
plot=plot,
chunk_size=chunk_size)
logging.info('Alignment finished successfully!')
def _duts_alignment(track_candidates_file, alignment_file, alignment_index, align_duts, selection_fit_duts, selection_hit_duts, selection_track_quality, n_pixels, pixel_size, use_n_tracks, n_duts, max_iterations, plot=True, chunk_size=100000): # Called for each list of DUTs to align
# Step 0: Reduce the number of tracks to increase the calculation time
logging.info('= Alignment step 0: Reduce number of tracks to %d =', use_n_tracks)
track_quality_mask = 0
for index, dut in enumerate(selection_hit_duts):
for quality in range(3):
if quality <= selection_track_quality[index]:
track_quality_mask |= ((1 << dut) << quality * 8)
logging.info('Use track with hits in DUTs %s', str(selection_hit_duts)[1:-1])
data_selection.select_hits(hit_file=track_candidates_file,
output_file=os.path.splitext(track_candidates_file)[0] + '_reduced_%d.h5' % alignment_index,
max_hits=use_n_tracks,
track_quality=track_quality_mask,
track_quality_mask=track_quality_mask,
chunk_size=chunk_size)
track_candidates_reduced = os.path.splitext(track_candidates_file)[0] + '_reduced_%d.h5' % alignment_index
# Step 1: Take the found tracks and revert the pre-alignment to start alignment from the beginning
logging.info('= Alignment step 1: Revert pre-alignment =')
apply_alignment(input_hit_file=track_candidates_reduced,
input_alignment_file=alignment_file, # Revert prealignent
output_hit_file=os.path.splitext(track_candidates_reduced)[0] + '_not_aligned.h5',
inverse=True,
force_prealignment=True,
chunk_size=chunk_size)
# Stage N: Repeat alignment with constrained residuals until total residual does not decrease anymore
_calculate_translation_alignment(track_candidates_file=os.path.splitext(track_candidates_reduced)[0] + '_not_aligned.h5',
alignment_file=alignment_file,
fit_duts=align_duts,
selection_fit_duts=selection_fit_duts,
selection_hit_duts=selection_hit_duts,
selection_track_quality=selection_track_quality,
n_pixels=n_pixels,
pixel_size=pixel_size,
n_duts=n_duts,
max_iterations=max_iterations,
plot_title_prefix='',
output_pdf=None,
chunk_size=chunk_size)
# Plot final result
if plot:
logging.info('= Alignment step 7: Plot final result =')
with PdfPages(os.path.join(os.path.dirname(os.path.realpath(track_candidates_file)), 'Alignment_%d.pdf' % alignment_index), keep_empty=False) as output_pdf:
# Apply final alignment result
apply_alignment(input_hit_file=os.path.splitext(track_candidates_reduced)[0] + '_not_aligned.h5',
input_alignment_file=alignment_file,
output_hit_file=os.path.splitext(track_candidates_file)[0] + '_final_tmp_%s.h5' % alignment_index,
chunk_size=chunk_size)
fit_tracks(input_track_candidates_file=os.path.splitext(track_candidates_file)[0] + '_final_tmp_%d.h5' % alignment_index,
input_alignment_file=alignment_file,
output_tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_final_tmp_%d.h5' % alignment_index,
fit_duts=align_duts, # Only create residuals of selected DUTs
selection_fit_duts=selection_fit_duts, # Only use selected duts
selection_hit_duts=selection_hit_duts,
exclude_dut_hit=True, # For unconstrained residuals
selection_track_quality=selection_track_quality,
chunk_size=chunk_size)
calculate_residuals(input_tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_final_tmp_%d.h5' % alignment_index,
input_alignment_file=alignment_file,
output_residuals_file=os.path.splitext(track_candidates_file)[0] + '_residuals_final_tmp_%d.h5' % alignment_index,
n_pixels=n_pixels,
pixel_size=pixel_size,
plot=plot,
chunk_size=chunk_size)
os.remove(os.path.splitext(track_candidates_file)[0] + '_final_tmp_%d.h5' % alignment_index)
os.remove(os.path.splitext(track_candidates_file)[0] + '_tracks_final_tmp_%d.h5' % alignment_index)
os.remove(os.path.splitext(track_candidates_file)[0] + '_tracks_final_tmp_%d.pdf' % alignment_index)
os.remove(os.path.splitext(track_candidates_file)[0] + '_residuals_final_tmp_%d.h5' % alignment_index)
os.remove(os.path.splitext(track_candidates_reduced)[0] + '_not_aligned.h5')
os.remove(os.path.splitext(track_candidates_file)[0] + '_reduced_%d.h5' % alignment_index)
def _calculate_translation_alignment(track_candidates_file, alignment_file, fit_duts, selection_fit_duts, selection_hit_duts, selection_track_quality, n_pixels, pixel_size, n_duts, max_iterations, plot_title_prefix='', output_pdf=None, chunk_size=100000):
''' Main function that fits tracks, calculates the residuals, deduces rotation and translation values from the residuals
and applies the new alignment to the track hits. The alignment result is scored as a combined
residual value of all planes that are being aligned in x and y weighted by the pixel pitch in x and y. '''
with tb.open_file(alignment_file, mode="r") as in_file_h5: # Open file with alignment data
alignment_last_iteration = in_file_h5.root.Alignment[:]
total_residual = None
for iteration in range(max_iterations):
if iteration >= max_iterations:
raise RuntimeError('Did not converge to good solution in %d iterations. Increase max_iterations', iteration)
apply_alignment(input_hit_file=track_candidates_file, # Always apply alignment to starting file
input_alignment_file=alignment_file,
output_hit_file=os.path.splitext(track_candidates_file)[0] + '_no_align_%d_tmp.h5' % iteration,
inverse=False,
force_prealignment=False,
chunk_size=chunk_size)
# Step 2: Fit tracks for all DUTs
logging.info('= Alignment step 2 / iteration %d: Fit tracks for all DUTs =', iteration)
fit_tracks(input_track_candidates_file=os.path.splitext(track_candidates_file)[0] + '_no_align_%d_tmp.h5' % iteration,
input_alignment_file=alignment_file,
output_tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.h5' % iteration,
fit_duts=fit_duts, # Only create residuals of selected DUTs
selection_fit_duts=selection_fit_duts, # Only use selected DUTs for track fit
selection_hit_duts=selection_hit_duts, # Only use selected duts
exclude_dut_hit=False, # For constrained residuals
selection_track_quality=selection_track_quality,
force_prealignment=False,
chunk_size=chunk_size)
# Step 3: Calculate the residuals for each DUT
logging.info('= Alignment step 3 / iteration %d: Calculate the residuals for each selected DUT =', iteration)
calculate_residuals(input_tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.h5' % iteration,
input_alignment_file=alignment_file,
output_residuals_file=os.path.splitext(track_candidates_file)[0] + '_residuals_%d_tmp.h5' % iteration,
n_pixels=n_pixels,
pixel_size=pixel_size,
# smaller devices needs None, otherwise npixels_per_bin=5 and nbins_per_pixel=1 might improve the first step
npixels_per_bin=None,
nbins_per_pixel=None,
plot=False,
chunk_size=chunk_size)
# Step 4: Deduce rotations from the residuals
logging.info('= Alignment step 4 / iteration %d: Deduce rotations and translations from the residuals =', iteration)
alignment_parameters_change, new_total_residual = _analyze_residuals(residuals_file=os.path.splitext(track_candidates_file)[0] + '_residuals_%d_tmp.h5' % iteration,
fit_duts=fit_duts,
pixel_size=pixel_size,
n_duts=n_duts,
translation_only=False,
plot_title_prefix=plot_title_prefix,
relaxation_factor=1.0, # FIXME: good code practice: nothing hardcoded
output_pdf=output_pdf)
# Create actual alignment (old alignment + the actual relative change)
new_alignment_parameters = geometry_utils.merge_alignment_parameters(
alignment_last_iteration,
alignment_parameters_change,
select_duts=fit_duts,
mode='relative')
# FIXME: This step does not work well
# # Step 5: Try to find better rotation by minimizing the residual in x + y for different angles
# logging.info('= Alignment step 5 / iteration %d: Optimize alignment by minimizing residuals =', iteration)
# new_alignment_parameters, new_total_residual = _optimize_alignment(tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.h5' % iteration,
# alignment_last_iteration=alignment_last_iteration,
# new_alignment_parameters=new_alignment_parameters,
# pixel_size=pixel_size)
# Delete not needed files
os.remove(os.path.splitext(track_candidates_file)[0] + '_no_align_%d_tmp.h5' % iteration)
os.remove(os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.h5' % iteration)
os.remove(os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.pdf' % iteration)
os.remove(os.path.splitext(track_candidates_file)[0] + '_residuals_%d_tmp.h5' % iteration)
logging.info('Total residual %1.4e', new_total_residual)
if total_residual is not None and new_total_residual > total_residual: # True if actual alignment is worse than the alignment from last iteration
logging.info('!! Best alignment found !!')
logging.info('= Alignment step 6 / iteration %d: Use rotation / translation information from previous iteration =', iteration)
geometry_utils.store_alignment_parameters(alignment_file, # Store alignment from last iteration
alignment_last_iteration,
mode='absolute',
select_duts=fit_duts)
return
else:
total_residual = new_total_residual
alignment_last_iteration = new_alignment_parameters.copy() # in_file_h5.root.Alignment[:]
logging.info('= Alignment step 6 / iteration %d: Set new rotation / translation information in alignment file =', iteration)
geometry_utils.store_alignment_parameters(alignment_file,
new_alignment_parameters,
mode='absolute',
select_duts=fit_duts)
# Helper functions for the alignment. Not to be used directly.
def _create_alignment_array(n_duts):
# Result Translation / rotation table
description = [('DUT', np.int32)]
description.append(('translation_x', np.float))
description.append(('translation_y', np.float))
description.append(('translation_z', np.float))
description.append(('alpha', np.float))
description.append(('beta', np.float))
description.append(('gamma', np.float))
description.append(('correlation_x', np.float))
description.append(('correlation_y', np.float))
array = np.zeros((n_duts,), dtype=description)
array[:]['DUT'] = np.array(range(n_duts))
return array
def _analyze_residuals(residuals_file, fit_duts, pixel_size, n_duts, translation_only=False, relaxation_factor=1.0, plot_title_prefix='', output_pdf=None):
''' Take the residual plots and deduce rotation and translation angles from them '''
alignment_parameters = _create_alignment_array(n_duts)
total_residual = 0 # Sum of all residuals to judge the overall alignment
with tb.open_file(residuals_file) as in_file_h5:
for dut_index in fit_duts:
alignment_parameters[dut_index]['DUT'] = dut_index
# Global residuals
hist_node = in_file_h5.get_node('/ResidualsX_DUT%d' % dut_index)
std_x = hist_node._v_attrs.fit_coeff[2]
# Add resdidual to total residual normalized to pixel pitch in x
total_residual = np.sqrt(np.square(total_residual) + np.square(std_x / pixel_size[dut_index][0]))
if output_pdf is not None:
plot_utils.plot_residuals(histogram=hist_node[:],
edges=hist_node._v_attrs.xedges,
fit=hist_node._v_attrs.fit_coeff,
fit_errors=hist_node._v_attrs.fit_cov,
title='Residuals for DUT%d' % dut_index,
x_label='X residual [um]',
output_pdf=output_pdf)
hist_node = in_file_h5.get_node('/ResidualsY_DUT%d' % dut_index)
std_y = hist_node._v_attrs.fit_coeff[2]
# Add resdidual to total residual normalized to pixel pitch in y
total_residual = np.sqrt(np.square(total_residual) + np.square(std_y / pixel_size[dut_index][1]))
if translation_only:
return alignment_parameters, total_residual
if output_pdf is not None:
plot_utils.plot_residuals(histogram=hist_node[:],
edges=hist_node._v_attrs.xedges,
fit=hist_node._v_attrs.fit_coeff,
fit_errors=hist_node._v_attrs.fit_cov,
title='Residuals for DUT%d' % dut_index,
x_label='Y residual [um]',
output_pdf=output_pdf)
# use offset at origin of sensor (center of sensor) to calculate x and y correction
# do not use mean/median of 1D residual since it depends on the beam spot position when the device is rotated
mu_x = in_file_h5.get_node_attr('/YResidualsX_DUT%d' % dut_index, 'fit_coeff')[0]
mu_y = in_file_h5.get_node_attr('/XResidualsY_DUT%d' % dut_index, 'fit_coeff')[0]
# use slope to calculate alpha, beta and gamma
m_xx = in_file_h5.get_node_attr('/XResidualsX_DUT%d' % dut_index, 'fit_coeff')[1]
m_yy = in_file_h5.get_node_attr('/YResidualsY_DUT%d' % dut_index, 'fit_coeff')[1]
m_xy = in_file_h5.get_node_attr('/XResidualsY_DUT%d' % dut_index, 'fit_coeff')[1]
m_yx = in_file_h5.get_node_attr('/YResidualsX_DUT%d' % dut_index, 'fit_coeff')[1]
alpha, beta, gamma = analysis_utils.get_rotation_from_residual_fit(m_xx=m_xx, m_xy=m_xy, m_yx=m_yx, m_yy=m_yy)
alignment_parameters[dut_index]['correlation_x'] = std_x
alignment_parameters[dut_index]['translation_x'] = -mu_x
alignment_parameters[dut_index]['correlation_y'] = std_y
alignment_parameters[dut_index]['translation_y'] = -mu_y
alignment_parameters[dut_index]['alpha'] = alpha * relaxation_factor
alignment_parameters[dut_index]['beta'] = beta * relaxation_factor
alignment_parameters[dut_index]['gamma'] = gamma * relaxation_factor
return alignment_parameters, total_residual
def _optimize_alignment(tracks_file, alignment_last_iteration, new_alignment_parameters, pixel_size):
''' Changes the angles of a virtual plane such that the projected track intersections onto this virtual plane
are most close to the measured hits on the real DUT at this position. Then the angles of the virtual plane
should correspond to the real DUT angles. The distance is not weighted quadratically (RMS) but linearly since
this leads to better results (most likely heavily scattered tracks / beam angle spread at the edges are weighted less).'''
# Create new absolute alignment
alignment_result = new_alignment_parameters
def _minimize_me(align, dut_position, hit_x_local, hit_y_local, hit_z_local, pixel_size, offsets, slopes):
# Calculate intersections with a dut plane given by alpha, beta, gamma at the dut_position in the global coordinate system
rotation_matrix = geometry_utils.rotation_matrix(alpha=align[0],
beta=align[1],
gamma=align[2])
basis_global = rotation_matrix.T.dot(np.eye(3))
dut_plane_normal = basis_global[2]
actual_dut_position = dut_position.copy()
actual_dut_position[2] = align[3] * 1e6 # Convert z position from m to um
intersections = geometry_utils.get_line_intersections_with_plane(line_origins=offsets,
line_directions=slopes,
position_plane=actual_dut_position,
normal_plane=dut_plane_normal)
# Transform to the local coordinate system to compare with measured hits
transformation_matrix = geometry_utils.global_to_local_transformation_matrix(x=actual_dut_position[0],
y=actual_dut_position[1],
z=actual_dut_position[2],
alpha=align[0],
beta=align[1],
gamma=align[2])
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_transformation_matrix(x=intersections[:, 0],
y=intersections[:, 1],
z=intersections[:, 2],
transformation_matrix=transformation_matrix)
# Cross check if transformations are correct (z == 0 in the local coordinate system)
if not np.allclose(hit_z_local[np.isfinite(hit_z_local)], 0) or not np.allclose(intersection_z_local, 0):
logging.error('Hit z position = %s and z intersection %s',
str(hit_z_local[~np.isclose(hit_z_local, 0)][:3]),
str(intersection_z_local[~np.isclose(intersection_z_local, 0)][:3]))
raise RuntimeError('The transformation to the local coordinate system did not give all z = 0. Wrong alignment used?')
return np.sum(np.abs(hit_x_local - intersection_x_local) / pixel_size[0]) + np.sum(np.abs(hit_y_local - intersection_y_local)) / pixel_size[1]
# return np.sqrt(np.square(np.std(hit_x_local - intersection_x_local) / pixel_size[0]) + np.square(np.std(hit_y_local - intersection_y_local)) / pixel_size[1])
with tb.open_file(tracks_file, mode='r') as in_file_h5:
residuals_before = []
residuals_after = []
for node in in_file_h5.root:
actual_dut = int(re.findall(r'\d+', node.name)[-1])
dut_position = np.array([alignment_last_iteration[actual_dut]['translation_x'], alignment_last_iteration[actual_dut]['translation_y'], alignment_last_iteration[actual_dut]['translation_z']])
# Hits with the actual alignment
hits = np.column_stack((node[:]['x_dut_%d' % actual_dut], node[:]['y_dut_%d' % actual_dut], node[:]['z_dut_%d' % actual_dut]))
# Transform hits to the local coordinate system
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hits_x=hits[:, 0],
hits_y=hits[:, 1],
hits_z=hits[:, 2],
dut_index=actual_dut,
alignment=alignment_last_iteration,
inverse=True)
# Track infos
offsets = np.column_stack((node[:]['offset_0'], node[:]['offset_1'], node[:]['offset_2']))
slopes = np.column_stack((node[:]['slope_0'], node[:]['slope_1'], node[:]['slope_2']))
# Rotation start values of minimizer
alpha = alignment_result[actual_dut]['alpha']
beta = alignment_result[actual_dut]['beta']
gamma = alignment_result[actual_dut]['gamma']
z_position = alignment_result[actual_dut]['translation_z']
# Trick to have the same order of magnitue of variation for angles and position, otherwise scipy minimizers
# do not converge if step size of parameters is very different
z_position_in_m = z_position / 1e6
residual = _minimize_me(np.array([alpha, beta, gamma, z_position_in_m]),
dut_position,
hit_x_local,
hit_y_local,
hit_z_local,
pixel_size[actual_dut],
offsets,
slopes)
residuals_before.append(residual)
logging.info('Optimize angles / z of DUT%d with start parameters: %1.2e, %1.2e, %1.2e Rad and z = %d um with residual %1.2e' % (actual_dut,
alpha,
beta,
gamma,
z_position_in_m * 1e6,
residual))
# FIXME:
# Has to be heavily restricted otherwise converges to unphysical solutions since the scoring with residuals is not really working well
bounds = [(alpha - 0.01, alpha + 0.01), (beta - 0.01, beta + 0.01), (gamma - 0.001, gamma + 0.001), (z_position_in_m - 10e-6, z_position_in_m + 10e-6)]
result = minimize(fun=_minimize_me,
x0=np.array([alpha, beta, gamma, z_position_in_m]), # Start values from residual fit
args=(dut_position, hit_x_local, hit_y_local, hit_z_local, pixel_size[actual_dut], offsets, slopes),
bounds=bounds,
method='SLSQP')
alpha, beta, gamma, z_position_in_m = result.x
residual = _minimize_me(result.x,
dut_position,
hit_x_local,
hit_y_local,
hit_z_local,
pixel_size[actual_dut],
offsets,
slopes)
residuals_after.append(residual)
logging.info('Found angles of DUT%d with best angles: %1.2e, %1.2e, %1.2e Rad and z = %d um with residual %1.2e' % (actual_dut,
alpha,
beta,
gamma,
z_position_in_m * 1e6,
residual))
# Rotation start values of minimizer
alignment_result[actual_dut]['alpha'] = alpha
alignment_result[actual_dut]['beta'] = beta
alignment_result[actual_dut]['gamma'] = gamma
alignment_result[actual_dut]['translation_z'] = z_position_in_m * 1e6 # convert z position from m to um
total_residuals_before = np.sqrt(np.sum(np.square(np.array(residuals_before))))
total_residuals_after = np.sqrt(np.sum(np.square(np.array(residuals_after))))
logging.info('Reduced the total residuals in the optimization steps from %1.2e to %1.2e', total_residuals_before, total_residuals_after)
if total_residuals_before < total_residuals_after:
raise RuntimeError('Alignment optimization did not converge!')
return alignment_result, total_residuals_after # Return alignment result and total residual
# Helper functions to be called from multiple processes
def _correlate_cluster(cluster_dut_0, cluster_file, start_index, start_event_number, stop_event_number, column_correlation, row_correlation, chunk_size):
with tb.open_file(cluster_file, mode='r') as actual_in_file_h5: # Open other DUT cluster file
for actual_dut_cluster, start_index in analysis_utils.data_aligned_at_events(actual_in_file_h5.root.Cluster, start_index=start_index, start_event_number=start_event_number, stop_event_number=stop_event_number, chunk_size=chunk_size, fail_on_missing_events=False): # Loop over the cluster in the actual cluster file in chunks
analysis_utils.correlate_cluster_on_event_number(data_1=cluster_dut_0,
data_2=actual_dut_cluster,
column_corr_hist=column_correlation,
row_corr_hist=row_correlation)
return start_index, column_correlation, row_correlation
| {
"repo_name": "SiLab-Bonn/testbeam_analysis",
"path": "testbeam_analysis/dut_alignment.py",
"copies": "1",
"size": "93557",
"license": "mit",
"hash": -7071609250347247000,
"line_mean": 63.4331955923,
"line_max": 416,
"alpha_frac": 0.5750718813,
"autogenerated": false,
"ratio": 4.026208202435771,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026043600132599027,
"num_lines": 1452
} |
__all__ = ["DWBPackageInstaller"]
from direct.p3d.PackageInstaller import PackageInstaller
from direct.gui.DirectWaitBar import DirectWaitBar
from direct.gui import DirectGuiGlobals as DGG
class DWBPackageInstaller(DirectWaitBar, PackageInstaller):
""" This class presents a PackageInstaller that also inherits from
DirectWaitBar, so it updates its own GUI as it downloads.
Specify perPackage = True to make the progress bar reset for each
package, or False (the default) to show one continuous progress
bar for all packages.
Specify updateText = True (the default) to update the text label
with the name of the package or False to leave it up to you to set
it.
You can specify a callback function with finished = func; this
function will be called, with one boolean parameter, when the
download has completed. The parameter will be true on success, or
false on failure.
"""
def __init__(self, appRunner, parent = None, **kw):
PackageInstaller.__init__(self, appRunner)
optiondefs = (
('borderWidth', (0.01, 0.01), None),
('relief', DGG.SUNKEN, self.setRelief),
('range', 1, self.setRange),
('barBorderWidth', (0.01, 0.01), self.setBarBorderWidth),
('barColor', (0.424, 0.647, 0.878, 1), self.setBarColor),
('barRelief', DGG.RAISED, self.setBarRelief),
('text', 'Starting', self.setText),
('text_pos', (0, -0.025), None),
('text_scale', 0.1, None),
('perPackage', False, None),
('updateText', True, None),
('finished', None, None),
)
self.defineoptions(kw, optiondefs)
DirectWaitBar.__init__(self, parent, **kw)
self.initialiseoptions(DWBPackageInstaller)
self.updateBarStyle()
# Hidden by default until the download begins.
self.hide()
def cleanup(self):
PackageInstaller.cleanup(self)
DirectWaitBar.destroy(self)
def destroy(self):
PackageInstaller.cleanup(self)
DirectWaitBar.destroy(self)
def packageStarted(self, package):
""" This callback is made for each package between
downloadStarted() and downloadFinished() to indicate the start
of a new package. """
if self['updateText']:
self['text'] = 'Installing %s' % (package.getFormattedName())
self.show()
def packageProgress(self, package, progress):
""" This callback is made repeatedly between packageStarted()
and packageFinished() to update the current progress on the
indicated package only. The progress value ranges from 0
(beginning) to 1 (complete). """
if self['perPackage']:
self['value'] = progress * self['range']
def downloadProgress(self, overallProgress):
""" This callback is made repeatedly between downloadStarted()
and downloadFinished() to update the current progress through
all packages. The progress value ranges from 0 (beginning) to
1 (complete). """
if not self['perPackage']:
self['value'] = overallProgress * self['range']
def downloadFinished(self, success):
""" This callback is made when all of the packages have been
downloaded and installed (or there has been some failure). If
all packages where successfully installed, success is True.
If there were no packages that required downloading, this
callback will be made immediately, *without* a corresponding
call to downloadStarted(). """
self.hide()
if self['finished']:
self['finished'](success)
| {
"repo_name": "matthiascy/panda3d",
"path": "direct/src/p3d/DWBPackageInstaller.py",
"copies": "8",
"size": "3922",
"license": "bsd-3-clause",
"hash": -4932623962831850000,
"line_mean": 39.8541666667,
"line_max": 76,
"alpha_frac": 0.6070882203,
"autogenerated": false,
"ratio": 4.534104046242774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005456349206349205,
"num_lines": 96
} |
######################################## IMPORTS ##########################################
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#tensorflow stuff
import time
import math
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
#all functions related to datasets
import dataSet
######################################## GLOBAL VARIABLES ##########################################
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float ('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 2000, 'Number of steps to run trainer.')
flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
# flags.DEFINE_integer('batch_size', 100, 'Batch size. Must divide evenly into the dataset sizes.')
flags.DEFINE_integer('batch_size', 2, 'Batch size. Must divide evenly into the dataset sizes.')
flags.DEFINE_string ('train_dir', 'data', 'Directory to put the training data.')
######################################## ACTUAL CODE ##########################################
def main(_):
data_sets = dataSet.getAllDataSets(FLAGS.train_dir)
with tf.Graph().as_default(): #using default graph
#MEMBER FUNCTIONS
songs_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size) # Generate placeholders for the songs and labels.
logits = inference(songs_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Build a Graph that computes predictions from the inference model.
loss = loss_funct(logits, labels_placeholder) # Add to the Graph the Ops for loss calculation.
train_op = training(loss, FLAGS.learning_rate) # Add to the Graph the Ops that calculate and apply gradients.
eval_correct = evaluation(logits, labels_placeholder) # Add the Op to compare the logits to the labels during evaluation.
#TF FUNCTION
summary_op = tf.merge_all_summaries() # Build the summary operation based on the TF collection of Summaries.
init = tf.initialize_all_variables() # Add the variable initializer Op.
#saver = tf.train.Saver() # Create a saver for writing training checkpoints.the saver creates a bunch of files, so commented for now.
sess = tf.Session() # Create a session for running Ops on the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) # Instantiate a SummaryWriter to output summaries and the Graph.
sess.run(init) # Run the Op to initialize the variables.
start_time = time.time()
# training loop.
for step in xrange(FLAGS.max_steps):
# Fill a feed dictionary with the data for this particular training step.
feed_dict = fill_feed_dict(data_sets.train, songs_placeholder, labels_placeholder)
# Run one step of the model. The return values are the activations from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them in the list passed to sess.run() and the value tensors will be returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
if step % 100 == 0:
duration = time.time() - start_time
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
start_time = time.time()
# Update the events file.
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
#saver.save(sess, FLAGS.train_dir, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess, eval_correct, songs_placeholder, labels_placeholder, data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess, eval_correct, songs_placeholder, labels_placeholder, data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess, eval_correct, songs_placeholder, labels_placeholder, data_sets.test)
#ENDOF MAIN()
def placeholder_inputs(batch_size):
songs_placeholder = tf.placeholder(tf.float32, shape=(batch_size, dataSet.TOTAL_INPUTS))
if dataSet.ONE_HOT:
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size, dataSet.NUM_CLASSES))
else:
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return songs_placeholder, labels_placeholder
def inference(images, hidden1_units, hidden2_units):
#Build the MNIST model up to where it may be used for inference.
# Hidden 1
with tf.name_scope('hidden1'):
weights = tf.Variable(tf.truncated_normal([dataSet.TOTAL_INPUTS, hidden1_units], stddev=1.0 / math.sqrt(float(dataSet.TOTAL_INPUTS))), name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]), name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hidden2'):
weights = tf.Variable(tf.truncated_normal([hidden1_units, hidden2_units], stddev=1.0 / math.sqrt(float(hidden1_units))), name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]), name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope('softmax_linear'):
weights = tf.Variable(tf.truncated_normal([hidden2_units, dataSet.NUM_CLASSES], stddev=1.0 / math.sqrt(float(hidden2_units))), name='weights')
biases = tf.Variable(tf.zeros([dataSet.NUM_CLASSES]), name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits
def loss_funct(logits, labels):
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def fill_feed_dict(data_set, images_pl, labels_pl):
"""Create the feed_dict for the placeholders filled with the next `batch size` examples."""
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size)
feed_dict = { images_pl: images_feed, labels_pl: labels_feed}
return feed_dict
def do_eval(sess, eval_correct, songs_placeholder, labels_placeholder, data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
songs_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from getAllDataSets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set, songs_placeholder, labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' % (num_examples, true_count, precision))
def training(loss, learning_rate):
"""Sets up the training Ops. Creates a summarizer to track the loss over time in TensorBoard. Creates an optimizer and applies the gradients
to all trainable variables. The Op returned by this function is what must be passed to the`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.scalar_summary(loss.op.name, loss)
# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, dataSet.NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the range [0, dataSet.NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op. It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label is in the top k (here k=1) of all logits for that example.
correct = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32))
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "vberthiaume/vblandr",
"path": "src/main.py",
"copies": "1",
"size": "11011",
"license": "apache-2.0",
"hash": 6780546542251430000,
"line_mean": 55.1785714286,
"line_max": 196,
"alpha_frac": 0.6321859958,
"autogenerated": false,
"ratio": 4.1131864026895775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5245372398489577,
"avg_score": null,
"num_lines": null
} |
__all__ = ('EggObserver',)
from openmdao.util.log import LOG_DEBUG2
class EggObserver(object):
"""
Provides a convenient API for calling an observer of egg operations.
`observer` will be called with:
- ``('analyze', filename, -1, -1)`` during module analysis.
- ``('add', filename, file_fraction, byte_fraction)`` while writing files.
- ``('copy', filename, file_fraction, byte_fraction)`` while copying files.
- ``('extract', filename, file_fraction, byte_fraction)`` while extracting files.
- ``('complete', egg_name, 1, 1)`` when complete.
- ``('except', message, -1, -1)`` when an exception occurs.
"""
def __init__(self, observer, logger):
assert observer is None or callable(observer)
self.observer = observer
self.logger = logger
def analyze(self, path):
"""
Observe analysis of file.
If `observer` returns False, raises :exc:`RuntimeError`.
path: string
Name of file being analyzed.
"""
self.logger.log(LOG_DEBUG2, " analyzing '%s'", path)
if self.observer is not None:
proceed = True
try:
proceed = self.observer('analyze', path, -1, -1)
except Exception as exc:
self.logger.log(LOG_DEBUG2, 'Exception calling observer: %s', exc)
else:
if not proceed:
raise RuntimeError('Aborted by observer.')
def add(self, path, file_fraction, byte_fraction):
"""
Observe add of file.
If `observer` returns False, raises :exc:`RuntimeError`.
path: string
Name of file being added.
file_fraction: float
Fraction of total files processed.
byte_fraction: float
Fraction of total bytes processed.
"""
self.logger.log(LOG_DEBUG2, " adding '%s'", path)
if self.observer is not None:
proceed = True
try:
proceed = self.observer('add', path, file_fraction,
byte_fraction)
except Exception as exc:
self.logger.log(LOG_DEBUG2, 'Exception calling observer: %s', exc)
else:
if not proceed:
raise RuntimeError('Aborted by observer.')
def copy(self, path, file_fraction, byte_fraction):
"""
Observe copy of file.
If `observer` returns False, raises :exc:`RuntimeError`.
path: string
Name of file being copied.
file_fraction: float
Fraction of total files processed.
byte_fraction: float
Fraction of total bytes processed.
"""
self.logger.log(LOG_DEBUG2, " copying '%s'", path)
if self.observer is not None:
proceed = True
try:
proceed = self.observer('copy', path, file_fraction,
byte_fraction)
except Exception as exc:
self.logger.log(LOG_DEBUG2, 'Exception calling observer: %s', exc)
else:
if not proceed:
raise RuntimeError('Aborted by observer.')
def extract(self, path, file_fraction, byte_fraction):
"""
Observe extraction of file.
If `observer` returns False, raises :exc:`RuntimeError`.
path: string
Name of file being extracted.
file_fraction: float
Fraction of total files processed.
byte_fraction: float
Fraction of total bytes processed.
"""
self.logger.log(LOG_DEBUG2, " extracting '%s'", path)
if self.observer is not None:
proceed = True
try:
proceed = self.observer('extract', path, file_fraction,
byte_fraction)
except Exception as exc:
self.logger.log(LOG_DEBUG2, 'Exception calling observer: %s', exc)
else:
if not proceed:
raise RuntimeError('Aborted by observer.')
def complete(self, path):
"""
Observe operation complete.
path: string
Name of file saved/loaded.
"""
if self.observer is not None:
try:
self.observer('complete', path, 1, 1)
except Exception as exc:
self.logger.log(LOG_DEBUG2, 'Exception calling observer: %s', exc)
def exception(self, msg):
"""
Observe exception.
msg: string
Exception message.
"""
self.logger.error(msg)
if self.observer is not None:
try:
self.observer('except', msg, -1, -1)
except Exception as exc:
self.logger.log(LOG_DEBUG2, 'Exception calling observer: %s', exc)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/OpenMDAO-Framework-dev/openmdao.util/src/openmdao/util/eggobserver.py",
"copies": "1",
"size": "4933",
"license": "mit",
"hash": 7413705992416260000,
"line_mean": 32.7876712329,
"line_max": 85,
"alpha_frac": 0.538009325,
"autogenerated": false,
"ratio": 4.65377358490566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5691782909905659,
"avg_score": null,
"num_lines": null
} |
__all__ = ['embed_download']
from ..common import *
from .bilibili import bilibili_download
from .iqiyi import iqiyi_download_by_vid
from .le import letvcloud_download_by_vu
from .netease import netease_download
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_id
from .vimeo import vimeo_download_by_id
from .yinyuetai import yinyuetai_download_by_id
from .youku import youku_download_by_vid
"""
refer to http://open.youku.com/tools
"""
youku_embed_patterns = [ 'youku\.com/v_show/id_([a-zA-Z0-9=]+)',
'player\.youku\.com/player\.php/sid/([a-zA-Z0-9=]+)/v\.swf',
'loader\.swf\?VideoIDS=([a-zA-Z0-9=]+)',
'player\.youku\.com/embed/([a-zA-Z0-9=]+)',
'YKU.Player\(\'[a-zA-Z0-9]+\',{ client_id: \'[a-zA-Z0-9]+\', vid: \'([a-zA-Z0-9]+)\''
]
"""
http://www.tudou.com/programs/view/html5embed.action?type=0&code=3LS_URGvl54&lcode=&resourceId=0_06_05_99
"""
tudou_embed_patterns = [ 'tudou\.com[a-zA-Z0-9\/\?=\&\.\;]+code=([a-zA-Z0-9_-]+)\&',
'www\.tudou\.com/v/([a-zA-Z0-9_-]+)/[^"]*v\.swf'
]
"""
refer to http://open.tudou.com/wiki/video/info
"""
tudou_api_patterns = [ ]
yinyuetai_embed_patterns = [ 'player\.yinyuetai\.com/video/swf/(\d+)' ]
iqiyi_embed_patterns = [ 'player\.video\.qiyi\.com/([^/]+)/[^/]+/[^/]+/[^/]+\.swf[^"]+tvId=(\d+)' ]
netease_embed_patterns = [ '(http://\w+\.163\.com/movie/[^\'"]+)' ]
vimeo_embed_patters = [ 'player\.vimeo\.com/video/(\d+)' ]
"""
check the share button on http://www.bilibili.com/video/av5079467/
"""
bilibili_embed_patterns = [ 'static\.hdslb\.com/miniloader\.swf.*aid=(\d+)' ]
def embed_download(url, output_dir = '.', merge = True, info_only = False ,**kwargs):
content = get_content(url, headers=fake_headers)
found = False
title = match1(content, '<title>([^<>]+)</title>')
vids = matchall(content, youku_embed_patterns)
for vid in set(vids):
found = True
youku_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, tudou_embed_patterns)
for vid in set(vids):
found = True
tudou_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, yinyuetai_embed_patterns)
for vid in vids:
found = True
yinyuetai_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, iqiyi_embed_patterns)
for vid in vids:
found = True
iqiyi_download_by_vid((vid[1], vid[0]), title=title, output_dir=output_dir, merge=merge, info_only=info_only)
urls = matchall(content, netease_embed_patterns)
for url in urls:
found = True
netease_download(url, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
urls = matchall(content, vimeo_embed_patters)
for url in urls:
found = True
vimeo_download_by_id(url, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
aids = matchall(content, bilibili_embed_patterns)
for aid in aids:
found = True
url = 'http://www.bilibili.com/video/av%s/' % aid
bilibili_download(url, output_dir=output_dir, merge=merge, info_only=info_only)
if not found:
raise NotImplementedError(url)
site_info = "any.any"
download = embed_download
download_playlist = playlist_not_supported('any.any')
| {
"repo_name": "cnbeining/you-get",
"path": "src/you_get/extractors/embed.py",
"copies": "2",
"size": "3610",
"license": "mit",
"hash": -2116521549316964600,
"line_mean": 35.8367346939,
"line_max": 117,
"alpha_frac": 0.6210526316,
"autogenerated": false,
"ratio": 2.829153605015674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9397387937154444,
"avg_score": 0.010563659892245812,
"num_lines": 98
} |
__all__ = ['embed_download']
from ..common import *
from .iqiyi import iqiyi_download_by_vid
from .le import letvcloud_download_by_vu
from .netease import netease_download
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_id
from .vimeo import vimeo_download_by_id
from .yinyuetai import yinyuetai_download_by_id
from .youku import youku_download_by_vid
"""
refer to http://open.youku.com/tools
"""
youku_embed_patterns = [ 'youku\.com/v_show/id_([a-zA-Z0-9=]+)',
'player\.youku\.com/player\.php/sid/([a-zA-Z0-9=]+)/v\.swf',
'loader\.swf\?VideoIDS=([a-zA-Z0-9=]+)',
'player\.youku\.com/embed/([a-zA-Z0-9=]+)',
'YKU.Player\(\'[a-zA-Z0-9]+\',{ client_id: \'[a-zA-Z0-9]+\', vid: \'([a-zA-Z0-9]+)\''
]
"""
http://www.tudou.com/programs/view/html5embed.action?type=0&code=3LS_URGvl54&lcode=&resourceId=0_06_05_99
"""
tudou_embed_patterns = [ 'tudou\.com[a-zA-Z0-9\/\?=\&\.\;]+code=([a-zA-Z0-9_]+)\&',
'www\.tudou\.com/v/([a-zA-Z0-9_-]+)/[^"]*v\.swf'
]
"""
refer to http://open.tudou.com/wiki/video/info
"""
tudou_api_patterns = [ ]
yinyuetai_embed_patterns = [ 'player\.yinyuetai\.com/video/swf/(\d+)' ]
iqiyi_embed_patterns = [ 'player\.video\.qiyi\.com/([^/]+)/[^/]+/[^/]+/[^/]+\.swf[^"]+tvId=(\d+)' ]
netease_embed_patterns = [ '(http://\w+\.163\.com/movie/[^\'"]+)' ]
vimeo_embed_patters = [ 'player\.vimeo\.com/video/(\d+)' ]
def embed_download(url, output_dir = '.', merge = True, info_only = False ,**kwargs):
content = get_content(url, headers=fake_headers)
found = False
title = match1(content, '<title>([^<>]+)</title>')
vids = matchall(content, youku_embed_patterns)
for vid in set(vids):
found = True
youku_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, tudou_embed_patterns)
for vid in set(vids):
found = True
tudou_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, yinyuetai_embed_patterns)
for vid in vids:
found = True
yinyuetai_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, iqiyi_embed_patterns)
for vid in vids:
found = True
iqiyi_download_by_vid((vid[1], vid[0]), title=title, output_dir=output_dir, merge=merge, info_only=info_only)
urls = matchall(content, netease_embed_patterns)
for url in urls:
found = True
netease_download(url, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
urls = matchall(content, vimeo_embed_patters)
for url in urls:
found = True
vimeo_download_by_id(url, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
if not found:
raise NotImplementedError(url)
site_info = "any.any"
download = embed_download
download_playlist = playlist_not_supported('any.any')
| {
"repo_name": "betaY/crawler",
"path": "you-get-master/src/you_get/extractors/embed.py",
"copies": "2",
"size": "3172",
"license": "mit",
"hash": 6218078539053646000,
"line_mean": 35.8837209302,
"line_max": 117,
"alpha_frac": 0.6138083228,
"autogenerated": false,
"ratio": 2.8145519077196095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.442836023051961,
"avg_score": null,
"num_lines": null
} |
__all__ = ['embed_download']
from ..common import *
from .iqiyi import iqiyi_download_by_vid
from .le import letvcloud_download_by_vu
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_id
from .yinyuetai import yinyuetai_download_by_id
from .youku import youku_download_by_vid
"""
refer to http://open.youku.com/tools
"""
youku_embed_patterns = [ 'youku\.com/v_show/id_([a-zA-Z0-9=]+)',
'player\.youku\.com/player\.php/sid/([a-zA-Z0-9=]+)/v\.swf',
'loader\.swf\?VideoIDS=([a-zA-Z0-9=]+)',
'player\.youku\.com/embed/([a-zA-Z0-9=]+)',
'YKU.Player\(\'[a-zA-Z0-9]+\',{ client_id: \'[a-zA-Z0-9]+\', vid: \'([a-zA-Z0-9]+)\''
]
"""
http://www.tudou.com/programs/view/html5embed.action?type=0&code=3LS_URGvl54&lcode=&resourceId=0_06_05_99
"""
tudou_embed_patterns = [ 'tudou\.com[a-zA-Z0-9\/\?=\&\.\;]+code=([a-zA-Z0-9_]+)\&',
'www\.tudou\.com/v/([a-zA-Z0-9_-]+)/[^"]*v\.swf'
]
"""
refer to http://open.tudou.com/wiki/video/info
"""
tudou_api_patterns = [ ]
yinyuetai_embed_patterns = [ 'player\.yinyuetai\.com/video/swf/(\d+)' ]
iqiyi_embed_patterns = [ 'player\.video\.qiyi\.com/([^/]+)/[^/]+/[^/]+/[^/]+\.swf[^"]+tvId=(\d+)' ]
def embed_download(url, output_dir = '.', merge = True, info_only = False ,**kwargs):
content = get_content(url)
found = False
title = match1(content, '<title>([^<>]+)</title>')
vids = matchall(content, youku_embed_patterns)
for vid in set(vids):
found = True
youku_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, tudou_embed_patterns)
for vid in set(vids):
found = True
tudou_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, yinyuetai_embed_patterns)
for vid in vids:
found = True
yinyuetai_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, iqiyi_embed_patterns)
for vid in vids:
found = True
iqiyi_download_by_vid((vid[1], vid[0]), title=title, output_dir=output_dir, merge=merge, info_only=info_only)
if not found:
raise NotImplementedError(url)
site_info = "any.any"
download = embed_download
download_playlist = playlist_not_supported('any.any')
| {
"repo_name": "linhua55/you-get",
"path": "src/you_get/extractors/embed.py",
"copies": "1",
"size": "2548",
"license": "mit",
"hash": 213281935754828260,
"line_mean": 36.4705882353,
"line_max": 117,
"alpha_frac": 0.6000784929,
"autogenerated": false,
"ratio": 2.787746170678337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3887824663578337,
"avg_score": null,
"num_lines": null
} |
__all__ = ['embed_download']
from ..common import *
from .iqiyi import iqiyi_download_by_vid
from .letv import letvcloud_download_by_vu
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_id
from .yinyuetai import yinyuetai_download_by_id
from .youku import youku_download_by_vid
"""
refer to http://open.youku.com/tools
"""
youku_embed_patterns = [ 'youku\.com/v_show/id_([a-zA-Z0-9=]+)',
'player\.youku\.com/player\.php/sid/([a-zA-Z0-9=]+)/v\.swf',
'loader\.swf\?VideoIDS=([a-zA-Z0-9=]+)',
'player\.youku\.com/embed/([a-zA-Z0-9=]+)',
'YKU.Player\(\'[a-zA-Z0-9]+\',{ client_id: \'[a-zA-Z0-9]+\', vid: \'([a-zA-Z0-9]+)\''
]
"""
http://www.tudou.com/programs/view/html5embed.action?type=0&code=3LS_URGvl54&lcode=&resourceId=0_06_05_99
"""
tudou_embed_patterns = [ 'tudou\.com[a-zA-Z0-9\/\?=\&\.\;]+code=([[a-zA-Z0-9_]+)\&',
'www\.tudou\.com/v/([[a-zA-Z0-9_]+)/[^"]*v\.swf'
]
"""
refer to http://open.tudou.com/wiki/video/info
"""
tudou_api_patterns = [ ]
yinyuetai_embed_patterns = [ 'player\.yinyuetai\.com/video/swf/(\d+)' ]
iqiyi_embed_patterns = [ 'player\.video\.qiyi\.com/([^/]+)/[^/]+/[^/]+/[^/]+\.swf[^"]+tvId=(\d+)' ]
def embed_download(url, output_dir = '.', merge = True, info_only = False ,**kwargs):
content = get_content(url)
found = False
title = match1(content, '<title>([^<>]+)</title>')
vids = matchall(content, youku_embed_patterns)
for vid in set(vids):
found = True
youku_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, tudou_embed_patterns)
for vid in set(vids):
found = True
tudou_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, yinyuetai_embed_patterns)
for vid in vids:
found = True
yinyuetai_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, iqiyi_embed_patterns)
for vid in vids:
found = True
iqiyi_download_by_vid((vid[1], vid[0]), title=title, output_dir=output_dir, merge=merge, info_only=info_only)
if not found:
raise NotImplementedError(url)
site_info = "any.any"
download = embed_download
download_playlist = playlist_not_supported('any.any')
| {
"repo_name": "jindaxia/you-get",
"path": "src/you_get/extractors/embed.py",
"copies": "1",
"size": "2551",
"license": "mit",
"hash": -8034753737348402000,
"line_mean": 36.5147058824,
"line_max": 117,
"alpha_frac": 0.6001568013,
"autogenerated": false,
"ratio": 2.787978142076503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3888134943376503,
"avg_score": null,
"num_lines": null
} |
__all__ = ['embed_download']
from ..common import *
from .letv import letvcloud_download_by_vu
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_id
from .youku import youku_download_by_vid
"""
refer to http://open.youku.com/tools
"""
youku_embed_patterns = [ 'youku\.com/v_show/id_([a-zA-Z0-9=]+)',
'player\.youku\.com/player\.php/sid/([a-zA-Z0-9=]+)/v\.swf',
'loader\.swf\?VideoIDS=([a-zA-Z0-9=]+)',
'player\.youku\.com/embed/([a-zA-Z0-9=]+)',
'YKU.Player\(\'[a-zA-Z0-9]+\',{ client_id: \'[a-zA-Z0-9]+\', vid: \'([a-zA-Z0-9]+)\''
]
"""
http://www.tudou.com/programs/view/html5embed.action?type=0&code=3LS_URGvl54&lcode=&resourceId=0_06_05_99
"""
tudou_embed_patterns = [ 'tudou\.com[a-zA-Z0-9\/\?=\&\.\;]+code=([[a-zA-Z0-9_]+)\&'
]
"""
refer to http://open.tudou.com/wiki/video/info
"""
tudou_api_patterns = [ ]
def embed_download(url, output_dir = '.', merge = True, info_only = False ,**kwargs):
content = get_content(url)
found = False
title = match1(content, '<title>([^<>]+)</title>')
vids = matchall(content, youku_embed_patterns)
for vid in vids:
found = True
youku_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, tudou_embed_patterns)
for vid in vids:
found = True
tudou_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
if not found:
raise NotImplementedError(url)
site_info = "any.any"
download = embed_download
download_playlist = playlist_not_supported('any.any')
| {
"repo_name": "specter4mjy/you-get",
"path": "src/you_get/extractors/embed.py",
"copies": "1",
"size": "1785",
"license": "mit",
"hash": -5738698972797349000,
"line_mean": 34,
"line_max": 117,
"alpha_frac": 0.5932773109,
"autogenerated": false,
"ratio": 2.811023622047244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3904300932947244,
"avg_score": null,
"num_lines": null
} |
__all__ = ['embed_download']
import urllib.parse
from ..common import *
from .bilibili import bilibili_download
from .dailymotion import dailymotion_download
from .iqiyi import iqiyi_download_by_vid
from .le import letvcloud_download_by_vu
from .netease import netease_download
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_id
from .vimeo import vimeo_download_by_id
from .yinyuetai import yinyuetai_download_by_id
from .youku import youku_download_by_vid
from . import iqiyi
from . import bokecc
"""
refer to http://open.youku.com/tools
"""
youku_embed_patterns = [ 'youku\.com/v_show/id_([a-zA-Z0-9=]+)',
'player\.youku\.com/player\.php/sid/([a-zA-Z0-9=]+)/v\.swf',
'loader\.swf\?VideoIDS=([a-zA-Z0-9=]+)',
'player\.youku\.com/embed/([a-zA-Z0-9=]+)',
'YKU.Player\(\'[a-zA-Z0-9]+\',{ client_id: \'[a-zA-Z0-9]+\', vid: \'([a-zA-Z0-9]+)\''
]
"""
http://www.tudou.com/programs/view/html5embed.action?type=0&code=3LS_URGvl54&lcode=&resourceId=0_06_05_99
"""
tudou_embed_patterns = [ 'tudou\.com[a-zA-Z0-9\/\?=\&\.\;]+code=([a-zA-Z0-9_-]+)\&',
'www\.tudou\.com/v/([a-zA-Z0-9_-]+)/[^"]*v\.swf'
]
"""
refer to http://open.tudou.com/wiki/video/info
"""
tudou_api_patterns = [ ]
yinyuetai_embed_patterns = [ 'player\.yinyuetai\.com/video/swf/(\d+)' ]
iqiyi_embed_patterns = [ 'player\.video\.qiyi\.com/([^/]+)/[^/]+/[^/]+/[^/]+\.swf[^"]+tvId=(\d+)' ]
netease_embed_patterns = [ '(http://\w+\.163\.com/movie/[^\'"]+)' ]
vimeo_embed_patters = [ 'player\.vimeo\.com/video/(\d+)' ]
dailymotion_embed_patterns = [ 'www\.dailymotion\.com/embed/video/(\w+)' ]
"""
check the share button on http://www.bilibili.com/video/av5079467/
"""
bilibili_embed_patterns = [ 'static\.hdslb\.com/miniloader\.swf.*aid=(\d+)' ]
'''
http://open.iqiyi.com/lib/player.html
'''
iqiyi_patterns = [r'(?:\"|\')(https?://dispatcher\.video\.qiyi\.com\/disp\/shareplayer\.swf\?.+?)(?:\"|\')',
r'(?:\"|\')(https?://open\.iqiyi\.com\/developer\/player_js\/coopPlayerIndex\.html\?.+?)(?:\"|\')']
bokecc_patterns = [r'bokecc\.com/flash/pocle/player\.swf\?siteid=(.+?)&vid=(.{32})']
recur_limit = 3
def embed_download(url, output_dir = '.', merge = True, info_only = False ,**kwargs):
content = get_content(url, headers=fake_headers)
found = False
title = match1(content, '<title>([^<>]+)</title>')
vids = matchall(content, youku_embed_patterns)
for vid in set(vids):
found = True
youku_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, tudou_embed_patterns)
for vid in set(vids):
found = True
tudou_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, yinyuetai_embed_patterns)
for vid in vids:
found = True
yinyuetai_download_by_id(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
vids = matchall(content, iqiyi_embed_patterns)
for vid in vids:
found = True
iqiyi_download_by_vid((vid[1], vid[0]), title=title, output_dir=output_dir, merge=merge, info_only=info_only)
urls = matchall(content, netease_embed_patterns)
for url in urls:
found = True
netease_download(url, output_dir=output_dir, merge=merge, info_only=info_only)
urls = matchall(content, vimeo_embed_patters)
for url in urls:
found = True
vimeo_download_by_id(url, title=title, output_dir=output_dir, merge=merge, info_only=info_only, referer=url)
urls = matchall(content, dailymotion_embed_patterns)
for url in urls:
found = True
dailymotion_download(url, output_dir=output_dir, merge=merge, info_only=info_only)
aids = matchall(content, bilibili_embed_patterns)
for aid in aids:
found = True
url = 'http://www.bilibili.com/video/av%s/' % aid
bilibili_download(url, output_dir=output_dir, merge=merge, info_only=info_only)
iqiyi_urls = matchall(content, iqiyi_patterns)
for url in iqiyi_urls:
found = True
iqiyi.download(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
bokecc_metas = matchall(content, bokecc_patterns)
for meta in bokecc_metas:
found = True
bokecc.bokecc_download_by_id(meta[1], output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
if found:
return True
# Try harder, check all iframes
if 'recur_lv' not in kwargs or kwargs['recur_lv'] < recur_limit:
r = kwargs.get('recur_lv')
if r is None:
r = 1
else:
r += 1
iframes = matchall(content, [r'<iframe.+?src=(?:\"|\')(.+?)(?:\"|\')'])
for iframe in iframes:
if not iframe.startswith('http'):
src = urllib.parse.urljoin(url, iframe)
else:
src = iframe
found = embed_download(src, output_dir=output_dir, merge=merge, info_only=info_only, recur_lv=r, **kwargs)
if found:
return True
if not found and 'recur_lv' not in kwargs:
raise NotImplementedError(url)
else:
return found
site_info = "any.any"
download = embed_download
download_playlist = playlist_not_supported('any.any')
| {
"repo_name": "zmwangx/you-get",
"path": "src/you_get/extractors/embed.py",
"copies": "2",
"size": "5515",
"license": "mit",
"hash": -2943652645683742700,
"line_mean": 35.045751634,
"line_max": 118,
"alpha_frac": 0.6103354488,
"autogenerated": false,
"ratio": 2.9571045576407506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9529187945897051,
"avg_score": 0.007650412108740095,
"num_lines": 153
} |
__all__ = ["Encoder"]
import re
import struct
from array import array
import pyconspack.header as H
import pyconspack.types as T
import pyconspack.error as E
import pyconspack.index as I
class EncodedObject:
def __init__(self, typename, val):
self.typename = typename
self.val = val
class Encoder:
def __init__(self, **kw):
self.opt = kw
self.bytes = bytearray()
self.refs = set()
self.tmaps = dict()
self.tags = dict()
self.written = set()
self.tag = 0
self.index = self._opt('index')
if(self.index):
self.index = I.Index(self.index)
def encode(self, val):
self._notice(val)
return self._encode(val)
def _encode(self, val, fixed=False):
if(id(val) in self.tags and id(val) in self.written):
return self.encode_ref(self.tags[id(val)], fixed)
elif(id(val) in self.tags):
self.encode_tag(self.tags[id(val)])
if(val.__class__ in self.encoders):
encoder = self.encoders[val.__class__]
else:
newval = self.encode_object(val)
return self._encode(newval, fixed)
self.written.add(id(val))
encoder(self, val, fixed)
def _write(self, b):
bytes = self.bytes
if(type(b) is int):
bytes.append(b)
else:
bytes[len(bytes):len(bytes)] = b
def _opt(self, name):
return (name in self.opt) and (self.opt[name])
def _refable(self, val):
return \
not (type(val) is int or
type(val) is float or
(type(val) is str and len(val) == 1 and
not self._opt('single_char_strings')))
def encode_bool(self, val, fixed=False):
if(val):
self._write(1)
else:
self._write(0)
def encode_int(self, val, fixed=False):
if(fixed is False):
code, fmt = H.guess_int(val)
self._write(H.NUMBER | code)
else:
code, fmt = H.fixed_type_fmt(fixed)
fmt = '>' + fmt
if fmt:
self._write(struct.pack(fmt, val))
elif(code == H.INT128 or code == H.UINT128):
if(val < 0):
val += 2**128
for i in reversed(range(16)):
self._write((val >> (i * 8)) & 0xFF)
def encode_double(self, val, fixed=False):
if(self._opt('all_floats_single') or
(fixed is not False and ((fixed & H.NUMBER_TYPE_MASK) == H.SINGLE_FLOAT))):
return self.encode_float(val, fixed)
if(not fixed):
self._write(H.NUMBER | H.DOUBLE_FLOAT)
self._write(struct.pack('>d', val))
def encode_float(self, val, fixed=False):
if(not fixed):
self._write(H.NUMBER | H.SINGLE_FLOAT)
self._write(struct.pack('>f', val))
def encode_string(self, val, fixed=False):
if(not self._opt('single_char_strings') and len(val) == 1):
return self.encode_char(val)
data = val.encode(encoding='utf-8', errors='strict')
size_bytes, fmt = H.size_bytes(len(data))
if(not fixed):
self._write(H.STRING | size_bytes)
self._write(struct.pack(fmt, len(data)))
self._write(data)
def encode_char(self, val, fixed=False):
if(len(val) > 1):
raise E.BadValue("{s} is not a character", val)
data = val.encode(encoding='utf-8', errors='strict')
if(not fixed):
self._write(H.CHARACTER | len(data))
self._write(data)
def encode_list(self, val, fixed=False):
if(self._opt('lists_are_vectors') and type(val) is list):
return self.encode_vector(val)
l = len(val)
if(l == 0):
return self.encode_bool(None)
if((l == 2 and type(val) is T.DottedList) or (l == 1)):
return self.encode_cons(val, fixed)
if(type(val) is not T.DottedList):
l += 1
size_bytes, fmt = H.size_bytes(l)
if(not fixed):
self._write(H.CONTAINER | H.CONTAINER_LIST | size_bytes)
self._write(struct.pack(fmt, l))
for item in val:
self._encode(item)
if(type(val) is not T.DottedList):
self._encode(None)
def encode_vector(self, val, fixed=False):
l = len(val)
size_bytes, fmt = H.size_bytes(l)
if(not fixed):
self._write(H.CONTAINER | H.CONTAINER_VECTOR | size_bytes)
self._write(struct.pack(fmt, l))
for item in val:
self._encode(item)
def encode_fixed_vector(self, val, fixed=False):
fixed_type = H.fixed_type(val, force_floats=self._opt('all_floats_single'))
if(fixed_type is None):
return self.encode_vector(val)
l = len(val)
size_bytes, fmt = H.size_bytes(l)
if(not fixed):
self._write(H.CONTAINER | H.CONTAINER_VECTOR | H.CONTAINER_FIXED |
size_bytes)
self._write(struct.pack(fmt, l))
self._write(fixed_type)
for i in val:
self._encode(i, fixed_type)
def encode_map(self, val, fixed=False):
self.encode_map_values(val, ((not fixed) and H.CONTAINER_MAP))
def encode_object(self, val):
if(id(val) in self.tmaps):
return self.tmaps[id(val)]
encoders = self._opt('encoders')
if(not encoders or type(val) not in encoders):
if(type(val) in Encoder.class_encoders):
encoders = Encoder.class_encoders
else:
raise E.NoEncoder("Encoder for {v} (type {t}) not found".format(v=val, t=type(val)))
typename, func = encoders[type(val)]
encoded = EncodedObject(typename, func(val))
self.tmaps[id(val)] = encoded
self._notice(typename)
self._notice(encoded)
return encoded
def dict_to_alist(self, d):
return [T.Cons((k, v)) for (k,v) in d.items()]
def encode_tmap(self, val, fixed=False):
self.encode_map_values(val.val, ((not fixed) and H.CONTAINER_TMAP),
is_tmap=True, type_ob=val.typename)
def encode_map_values(self, val, header=False, is_tmap=False,
type_ob=None):
keys = val.keys()
l = len(keys)
size_bytes, fmt = H.size_bytes(l)
if(header is not False):
self._write(H.CONTAINER | header | size_bytes)
self._write(struct.pack(fmt, l))
if(is_tmap):
self._encode(type_ob)
for k in keys:
# Key
if(isinstance(k, str)):
new_k = k
if(not self._opt('no_sub_underscores') and k[0] != '_'):
new_k = re.sub(r'_', r'-', k)
if(type_ob):
self._encode(T.intern(new_k, type_ob.package))
else:
self._encode(T.keyword(new_k))
else:
self._encode(k)
# Value
self._encode(val[k])
def encode_package(self, val, fixed=False):
if(not fixed):
self._write(H.PACKAGE)
self._encode(val.name)
def encode_symbol(self, val, fixed=False):
if(self.index and val in self.index):
return self.encode_index(self.index[val])
if(val.package is T.package('keyword')):
return self.encode_keyword(val, fixed)
if(not fixed):
self._write(H.SYMBOL)
self._encode(val.name)
self._encode(val.package)
def encode_keyword(self, val, fixed=False):
if(not fixed):
self._write(H.SYMBOL | H.SYMBOL_KEYWORD)
self._encode(val.name)
def encode_pointer(self, val, fixed=False):
size_bytes, fmt = H.size_bytes(val)
if(not fixed):
self._write(H.POINTER | size_bytes)
self._write(struct.pack(fmt, val))
def encode_cons(self, val, fixed=False):
l = len(val)
if(not fixed):
self._write(H.CONS)
if(l > 0): self._encode(val[0])
else: self._encode(None)
if(l > 1): self._encode(val[1])
else: self._encode(None)
def encode_rref(self, val, fixed=False):
if(not fixed):
self._write(H.REMOTE_REF)
self._encode(val.value)
def encode_index(self, val, fixed=False):
if(val < 0):
raise E.OutOfBounds("Invalid index {n}, index values must be positive".format(n=val))
if(not fixed):
if(val < 16):
return self._write(H.INDEX | H.REFTAG_INLINE | val)
size_bytes, fmt = H.size_bytes(val)
self._write(H.INDEX | size_bytes)
self._write(struct.pack(fmt, val))
def encode_tag(self, val, fixed=False):
if(not fixed):
if(val < 16):
return self._write(H.TAG | H.REFTAG_INLINE | val)
size_bytes, fmt = H.size_bytes(val)
self._write(H.TAG | size_bytes)
self._write(struct.pack(fmt, val))
def encode_ref(self, val, fixed=False):
if(not fixed):
if(val < 16):
return self._write(H.REF | H.REFTAG_INLINE | val)
size_bytes, fmt = H.size_bytes(val)
self._write(H.REF | size_bytes)
self._write(struct.pack(fmt, val))
def _notice(self, val):
if(self._opt('norefs')):
return
if(not self._refable(val)):
return
if(id(val) in self.refs):
if(not id(val) in self.tags):
self.tags[id(val)] = self.tag
self.tag += 1
return
else:
self.refs.add(id(val))
if(isinstance(val, list) or
isinstance(val, tuple)):
for i in val:
self._notice(i)
elif(isinstance(val, dict)):
for i in val:
if(type(i) is not str):
self._notice(i)
self._notice(val[i])
elif(isinstance(val, T.Symbol)):
self._notice(val.name)
self._notice(val.package)
elif(isinstance(val, EncodedObject)):
self._notice(val.val)
elif(val.__class__ not in self.encoders):
self.encode_object(val)
class_encoders = dict()
def register(c, symbol, func):
Encoder.class_encoders[c] = (symbol, func)
def deregister(c):
del Encoder.class_encoders[c]
encoders = {
bool: encode_bool,
type(None): encode_bool,
int: encode_int,
T.SingleFloat: encode_float,
float: encode_double,
str: encode_string,
T.Char: encode_char,
tuple: encode_list,
list: encode_list,
T.Cons: encode_cons,
T.DottedList: encode_list,
array: encode_fixed_vector,
bytes: encode_fixed_vector,
bytearray: encode_fixed_vector,
T.Vector: encode_vector,
dict: encode_map,
T.Package: encode_package,
T.Symbol: encode_symbol,
T.Pointer: encode_pointer,
EncodedObject: encode_tmap,
T.RRef: encode_rref,
T.Index: encode_index,
}
| {
"repo_name": "conspack/pyconspack",
"path": "encode.py",
"copies": "1",
"size": "11267",
"license": "bsd-2-clause",
"hash": 4471520132313202700,
"line_mean": 29.2876344086,
"line_max": 100,
"alpha_frac": 0.5277358658,
"autogenerated": false,
"ratio": 3.544196288140925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4571932153940925,
"avg_score": null,
"num_lines": null
} |
"""All encounter-related routes.
If the user wants to browse/search encounters, or look at the overview for a
particular encounter, they'll probably want one of these routes.
"""
from bottle import route, template, request, HTTPError
from db import Encounter, Combatant, Swing, DataRequest, getDataTable, crosstab
from datetime import datetime, timedelta
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.dialects import postgresql
from sqlalchemy import func, BigInteger, Column, TIMESTAMP
__author__ = "David Bliss"
__copyright__ = "Copyright (C) 2017 David Bliss"
__license__ = "Apache-2.0"
__version__ = "1.1"
__all__ = ('listEncounters', 'encounterInfo', 'encounterCombatantPerformance',
'encounterDamageTypeInfo', 'encounterAttackTypeInfo',
'queryEncounterTable')
@route('/encounter')
def listEncounters(dbSession):
"""List all the encounters, possibly providing search functionality."""
# This query only matters for people without JavaScript.
recentEncounters = dbSession.query(Encounter) \
.filter(Encounter.endtime > (datetime.now() - timedelta(days=33))) \
.order_by(Encounter.endtime.desc()).limit(25).all()
return template('encounterList', encounters=recentEncounters)
@route('/encounter/<encounterId>')
@route('/encounter/<encounterId>/')
def encounterInfo(encounterId, dbSession):
"""Provide an overview of a given encounter."""
if (not encounterId.isalnum()):
raise HTTPError(404)
try:
enc = dbSession.query(Encounter).filter(
Encounter.encid == encounterId
).one()
except NoResultFound:
raise HTTPError(404)
except MultipleResultsFound:
raise HTTPError(404)
allies = dbSession.query(Combatant).filter(
Combatant.encid == encounterId,
Combatant.ally == 'T'
).all()
foes = dbSession.query(Combatant).filter(
Combatant.encid == encounterId,
Combatant.ally == 'F'
).all()
topAlliedAttacks = dbSession.query(Swing).filter(
Swing.encid == encounterId,
Swing.swingtype == 1,
Swing.attackerName.in_(a.name for a in allies)
).order_by(Swing.damage.desc()).limit(10)
print(
str(topAlliedAttacks.statement.compile(dialect=postgresql.dialect()))
)
topAlliedAttacks = topAlliedAttacks.all()
topAlliedHeals = dbSession.query(Swing).filter(
Swing.encid == encounterId,
Swing.swingtype == 3,
Swing.attackerName.in_(a.name for a in allies)
).order_by(Swing.damage.desc()).limit(10).all()
topEnemyAttacks = dbSession.query(Swing).filter(
Swing.encid == encounterId,
Swing.swingtype == 1,
Swing.attackerName.in_(a.name for a in foes)
).order_by(Swing.damage.desc()).limit(10).all()
topEnemyHeals = dbSession.query(Swing).filter(
Swing.encid == encounterId,
Swing.swingtype == 3,
Swing.attackerName.in_(a.name for a in foes)
).order_by(Swing.damage.desc()).limit(10).all()
return template('encounterDetail', encounter=enc, allies=allies, foes=foes,
alliedHits=topAlliedAttacks, alliedHeals=topAlliedHeals,
foeHits=topEnemyAttacks, foeHeals=topEnemyHeals)
@route('/encounter/<encounterId>/c/<combatantName>')
def encounterCombatantPerformance(encounterId, combatantName, dbSession):
"""For a given encounter, how did a particular character do in detail."""
return template('encounterCombatant')
@route('/encounter/<encounterId>/d/<damageTypeId>')
def encounterDamageTypeInfo(encounterId, damageTypeId, dbSession):
"""For a given encounter, look at a particular group of abilities."""
pass
@route('/encounter/<encounterId>/a/<attackTypeId>')
def encounterAttackTypeInfo(encounterId, attackTypeId, dbSession):
"""For an encounter, look at how a particular ability performed."""
return template('encounterAttackType')
@route('/data/encounter')
@route('/data/encounter', method="POST")
def queryEncounterTable(dbSession):
"""Return JSON describing the results of an arbitrary query/search."""
dataReq = DataRequest(Encounter, request.json)
return getDataTable(dataReq, dbSession)
def debugQuery(q):
"""Quickly print a compiled query for debugging."""
print(q.compile(dialect=postgresql.dialect()))
@route('/encounter/<encounterId>/graph')
def encounterGraph(encounterId, dbSession):
# WARNING: VALIDATION PREVENTING A SQL INJECTION
if not encounterId.isalnum():
raise HTTPError(404)
allied = request.params['allied'] in (True, 'True', 'true', 't', 1, 'T')
attackTypeId = int(request.params['attackTypeId'])
allied = 'T' if allied else 'F'
# END VALIDATION
swingData = dbSession.query(
Swing.stime,
Swing.attackerName,
func.sum(Swing.damage)
).join(Combatant.outSwings).filter(
Swing.encid == encounterId,
Swing.swingtype == attackTypeId,
Combatant.ally == allied
).group_by(
Swing.stime,
Swing.attackerName
).order_by(
Swing.stime,
Swing.attackerName
)
combatants = dbSession.query(Combatant.name).filter(
Combatant.encid == encounterId,
Combatant.ally == allied
).group_by(
Combatant.name
).order_by(Combatant.name)
combatantNameList = tuple(n for (n,) in combatants.all())
"""
# WARNING: SQL INJECTION RISK FOLLOWS
# The alternative is roundabout, slow, ugly, and broken.
qRaw = '''SELECT *
FROM crosstab('SELECT sw.stime, sw.attacker, SUM(sw.damage)
FROM swing_table sw
JOIN combatant_table cm
ON sw.attacker = cm.name
AND sw.encid = cm.encid
WHERE sw.encid =''{0}''
AND sw.swingtype = {1}
AND cm.ally = ''{3}''
GROUP BY sw.stime, sw.attacker
ORDER BY sw.stime ASC, sw.attacker ASC',
'SELECT sw.attacker
FROM swing_table sw
JOIN combatant_table cm
ON sw.attacker = cm.name
AND sw.encid = cm.encid
WHERE sw.encid=''{0}''
AND sw.swingtype={1}
AND cm.ally = ''{3}''
GROUP BY sw.attacker
ORDER BY sw.attacker ASC')
AS (
stime timestamp,
{2}
)''
asList = ',\n'.join(('"{}" bigint'.format(c) for c in combatantNameList))
timelineQuery = qRaw.format(encounterId, attackTypeId, asList, allied)
yield 'Swing Time,' + ','.join(combatantNameList) + '\n'
yield from (','.join(str('NaN' if v is None else v) for (k, v) in
row.items())
+ '\n' for row in dbSession.execute(timelineQuery))
"""
combatantCols = tuple(Column(cn, BigInteger) for cn in combatantNameList)
ctFrom = crosstab(swingData,
(Column('stime', TIMESTAMP),) + combatantCols,
categories=combatants,
auto_order=False
)
timelineQuery = dbSession.query(
Column('stime', TIMESTAMP),
*combatantCols
).select_from(ctFrom)
yield 'Swing Time,' + ','.join(combatantNameList) + '\n'
yield from (','.join(str('NaN' if v is None else v) for v in
row) + '\n' for row in timelineQuery)
| {
"repo_name": "proegssilb/tsw-stats",
"path": "pages/encounterRoutes.py",
"copies": "1",
"size": "7570",
"license": "apache-2.0",
"hash": 2362976590247454700,
"line_mean": 37.040201005,
"line_max": 79,
"alpha_frac": 0.6233817701,
"autogenerated": false,
"ratio": 3.618546845124283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9741928615224282,
"avg_score": 0,
"num_lines": 199
} |
__all__ = ('EncryptedBlockStorage',)
import struct
import hmac
import hashlib
from pyoram.storage.block_storage import (BlockStorageInterface,
BlockStorageTypeFactory)
from pyoram.crypto.aes import AES
import six
class EncryptedBlockStorageInterface(BlockStorageInterface):
#
# Abstract Interface
#
@property
def key(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def raw_storage(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
class EncryptedBlockStorage(EncryptedBlockStorageInterface):
_index_struct_string = "!"+("x"*hashlib.sha384().digest_size)+"?"
_index_offset = struct.calcsize(_index_struct_string)
_verify_struct_string = "!LLL"
_verify_size = struct.calcsize(_verify_struct_string)
def __init__(self, storage, **kwds):
self._key = kwds.pop('key', None)
if self._key is None:
raise ValueError(
"An encryption key is required using "
"the 'key' keyword.")
if isinstance(storage, BlockStorageInterface):
storage_owned = False
self._storage = storage
if len(kwds):
raise ValueError(
"Keywords not used when initializing "
"with a storage device: %s"
% (str(kwds)))
else:
storage_owned = True
storage_type = kwds.pop('storage_type', 'file')
self._storage = \
BlockStorageTypeFactory(storage_type)(storage, **kwds)
try:
header_data = AES.GCMDec(self._key,
self._storage.header_data)
(self._ismodegcm,) = struct.unpack(
self._index_struct_string,
header_data[:self._index_offset])
self._verify_digest = header_data[:hashlib.sha384().digest_size]
verify = hmac.HMAC(
key=self.key,
msg=struct.pack(self._verify_struct_string,
self._storage.block_size,
self._storage.block_count,
len(self._storage.header_data)),
digestmod=hashlib.sha384)
if verify.digest() != self._verify_digest:
raise ValueError(
"HMAC of plaintext index data does not match")
if self._ismodegcm:
self._encrypt_block_func = AES.GCMEnc
self._decrypt_block_func = AES.GCMDec
else:
self._encrypt_block_func = AES.CTREnc
self._decrypt_block_func = AES.CTRDec
except:
if storage_owned:
self._storage.close()
raise
#
# Define EncryptedBlockStorageInterface Methods
#
@property
def key(self):
return self._key
@property
def raw_storage(self):
return self._storage
#
# Define BlockStorageInterface Methods
#
def clone_device(self):
return EncryptedBlockStorage(self._storage.clone_device(),
key=self.key)
@classmethod
def compute_storage_size(cls,
block_size,
block_count,
aes_mode='ctr',
storage_type='file',
ignore_header=False,
**kwds):
assert (block_size > 0) and (block_size == int(block_size))
assert (block_count > 0) and (block_count == int(block_count))
assert aes_mode in ('ctr', 'gcm')
if not isinstance(storage_type, BlockStorageInterface):
storage_type = BlockStorageTypeFactory(storage_type)
if aes_mode == 'ctr':
extra_block_data = AES.block_size
else:
assert aes_mode == 'gcm'
extra_block_data = 2 * AES.block_size
if ignore_header:
return (extra_block_data * block_count) + \
storage_type.compute_storage_size(
block_size,
block_count,
ignore_header=True,
**kwds)
else:
return cls._index_offset + \
2 * AES.block_size + \
(extra_block_data * block_count) + \
storage_type.compute_storage_size(
block_size,
block_count,
ignore_header=False,
**kwds)
@classmethod
def setup(cls,
storage_name,
block_size,
block_count,
aes_mode='ctr',
key_size=None,
key=None,
storage_type='file',
initialize=None,
**kwds):
if (key is not None) and (key_size is not None):
raise ValueError(
"Only one of 'key' or 'keysize' keywords can "
"be specified at a time")
if key is None:
if key_size is None:
key_size = 32
if key_size not in AES.key_sizes:
raise ValueError(
"Invalid key size: %s" % (key_size))
key = AES.KeyGen(key_size)
else:
if len(key) not in AES.key_sizes:
raise ValueError(
"Invalid key size: %s" % (len(key)))
if (block_size <= 0) or (block_size != int(block_size)):
raise ValueError(
"Block size (bytes) must be a positive integer: %s"
% (block_size))
ismodegcm = None
encrypt_block_func = None
encrypted_block_size = block_size
if aes_mode == 'ctr':
ismodegcm = False
encrypt_block_func = AES.CTREnc
encrypted_block_size += AES.block_size
elif aes_mode == 'gcm':
ismodegcm = True
encrypt_block_func = AES.GCMEnc
encrypted_block_size += (2 * AES.block_size)
else:
raise ValueError(
"AES encryption mode must be one of 'ctr' or 'gcm'. "
"Invalid value: %s" % (aes_mode))
assert ismodegcm is not None
assert encrypt_block_func is not None
if not isinstance(storage_type, BlockStorageInterface):
storage_type = BlockStorageTypeFactory(storage_type)
if initialize is None:
zeros = bytes(bytearray(block_size))
initialize = lambda i: zeros
def encrypted_initialize(i):
return encrypt_block_func(key, initialize(i))
kwds['initialize'] = encrypted_initialize
user_header_data = kwds.get('header_data', bytes())
if type(user_header_data) is not bytes:
raise TypeError(
"'header_data' must be of type bytes. "
"Invalid type: %s" % (type(user_header_data)))
# we generate the first time simply to
# compute the length
tmp = hmac.HMAC(
key=key,
msg=struct.pack(cls._verify_struct_string,
encrypted_block_size,
block_count,
0),
digestmod=hashlib.sha384).digest()
header_data = bytearray(struct.pack(cls._index_struct_string,
ismodegcm))
header_data[:hashlib.sha384().digest_size] = tmp
header_data = header_data + user_header_data
header_data = AES.GCMEnc(key, bytes(header_data))
# now that we know the length of the header data
# being sent to the underlying storage we can
# compute the real hmac
verify_digest = hmac.HMAC(
key=key,
msg=struct.pack(cls._verify_struct_string,
encrypted_block_size,
block_count,
len(header_data)),
digestmod=hashlib.sha384).digest()
header_data = bytearray(struct.pack(cls._index_struct_string,
ismodegcm))
header_data[:hashlib.sha384().digest_size] = verify_digest
header_data = header_data + user_header_data
kwds['header_data'] = AES.GCMEnc(key, bytes(header_data))
return EncryptedBlockStorage(
storage_type.setup(storage_name,
encrypted_block_size,
block_count,
**kwds),
key=key)
@property
def header_data(self):
return AES.GCMDec(self._key,
self._storage.header_data)\
[self._index_offset:]
@property
def block_count(self):
return self._storage.block_count
@property
def block_size(self):
if self._ismodegcm:
return self._storage.block_size - 2 * AES.block_size
else:
return self._storage.block_size - AES.block_size
@property
def storage_name(self):
return self._storage.storage_name
def update_header_data(self, new_header_data):
self._storage.update_header_data(
AES.GCMEnc(
self.key,
AES.GCMDec(self._key,
self._storage.header_data)\
[:self._index_offset] + \
new_header_data))
def close(self):
self._storage.close()
def read_block(self, i):
return self._decrypt_block_func(
self._key,
self._storage.read_block(i))
def read_blocks(self, indices, *args, **kwds):
return [self._decrypt_block_func(self._key, b)
for b in self._storage.read_blocks(indices, *args, **kwds)]
def yield_blocks(self, indices, *args, **kwds):
for b in self._storage.yield_blocks(indices, *args, **kwds):
yield self._decrypt_block_func(self._key, b)
def write_block(self, i, block, *args, **kwds):
self._storage.write_block(
i,
self._encrypt_block_func(self._key, block),
*args, **kwds)
def write_blocks(self, indices, blocks, *args, **kwds):
enc_blocks = []
for i, b in zip(indices, blocks):
enc_blocks.append(
self._encrypt_block_func(self._key, b))
self._storage.write_blocks(indices, enc_blocks, *args, **kwds)
@property
def bytes_sent(self):
return self._storage.bytes_sent
@property
def bytes_received(self):
return self._storage.bytes_received
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/encrypted_storage/encrypted_block_storage.py",
"copies": "1",
"size": "10855",
"license": "mit",
"hash": -3628606151060027000,
"line_mean": 34.4738562092,
"line_max": 76,
"alpha_frac": 0.5118378627,
"autogenerated": false,
"ratio": 4.391181229773463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010881469077284967,
"num_lines": 306
} |
__all__ = ('EncryptedHeapStorage',)
import struct
from pyoram.util.virtual_heap import SizedVirtualHeap
from pyoram.storage.heap_storage import \
(HeapStorageInterface,
HeapStorage)
from pyoram.encrypted_storage.encrypted_block_storage import \
(EncryptedBlockStorageInterface,
EncryptedBlockStorage)
class EncryptedHeapStorageInterface(HeapStorageInterface):
#
# Abstract Interface
#
@property
def key(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def raw_storage(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
class EncryptedHeapStorage(HeapStorage,
EncryptedHeapStorageInterface):
def __init__(self, storage, **kwds):
if isinstance(storage, EncryptedBlockStorageInterface):
if len(kwds):
raise ValueError(
"Keywords not used when initializing "
"with a storage device: %s"
% (str(kwds)))
else:
storage = EncryptedBlockStorage(storage, **kwds)
super(EncryptedHeapStorage, self).__init__(storage)
#
# Define EncryptedHeapStorageInterface Methods
#
@property
def key(self):
return self._storage.key
@property
def raw_storage(self):
return self._storage.raw_storage
#
# Define HeapStorageInterface Methods
# (override what is defined on HeapStorage)
def clone_device(self):
return EncryptedHeapStorage(self._storage.clone_device())
@classmethod
def compute_storage_size(cls,
block_size,
heap_height,
blocks_per_bucket=1,
heap_base=2,
ignore_header=False,
**kwds):
assert (block_size > 0) and (block_size == int(block_size))
assert heap_height >= 0
assert blocks_per_bucket >= 1
assert heap_base >= 2
assert 'block_count' not in kwds
vheap = SizedVirtualHeap(
heap_base,
heap_height,
blocks_per_bucket=blocks_per_bucket)
if ignore_header:
return EncryptedBlockStorage.compute_storage_size(
vheap.blocks_per_bucket * block_size,
vheap.bucket_count(),
ignore_header=True,
**kwds)
else:
return cls._header_offset + \
EncryptedBlockStorage.compute_storage_size(
vheap.blocks_per_bucket * block_size,
vheap.bucket_count(),
ignore_header=False,
**kwds)
@classmethod
def setup(cls,
storage_name,
block_size,
heap_height,
blocks_per_bucket=1,
heap_base=2,
**kwds):
if 'block_count' in kwds:
raise ValueError("'block_count' keyword is not accepted")
if heap_height < 0:
raise ValueError(
"heap height must be 0 or greater. Invalid value: %s"
% (heap_height))
if blocks_per_bucket < 1:
raise ValueError(
"blocks_per_bucket must be 1 or greater. "
"Invalid value: %s" % (blocks_per_bucket))
if heap_base < 2:
raise ValueError(
"heap base must be 2 or greater. Invalid value: %s"
% (heap_base))
vheap = SizedVirtualHeap(
heap_base,
heap_height,
blocks_per_bucket=blocks_per_bucket)
user_header_data = kwds.pop('header_data', bytes())
if type(user_header_data) is not bytes:
raise TypeError(
"'header_data' must be of type bytes. "
"Invalid type: %s" % (type(user_header_data)))
kwds['header_data'] = \
struct.pack(cls._header_struct_string,
heap_base,
heap_height,
blocks_per_bucket) + \
user_header_data
return EncryptedHeapStorage(
EncryptedBlockStorage.setup(
storage_name,
vheap.blocks_per_bucket * block_size,
vheap.bucket_count(),
**kwds))
#@property
#def header_data(...)
#@property
#def bucket_count(...)
#@property
#def bucket_size(...)
#@property
#def blocks_per_bucket(...)
#@property
#def storage_name(...)
#@property
#def virtual_heap(...)
#@property
#def bucket_storage(...)
#def update_header_data(...)
#def close(...)
#def read_path(...)
#def write_path(...)
#@property
#def bytes_sent(...)
#@property
#def bytes_received(...)
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/encrypted_storage/encrypted_heap_storage.py",
"copies": "1",
"size": "4968",
"license": "mit",
"hash": 6830713841002255000,
"line_mean": 27.8837209302,
"line_max": 73,
"alpha_frac": 0.5249597424,
"autogenerated": false,
"ratio": 4.532846715328467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0069976577105593536,
"num_lines": 172
} |
""" allen_data_prep loads data from the Allen emissions data spreadsheet and processes it before saving the
results to a DataFile object. The Allen emissions data are from the following resource:
D. Allen, V. M. Torres, et al. \Measurements of methane emissions at natural
gas production sites in the United States David". In: Proceedings of the National
Academy of Sciences 110.44 (Sept. 2013), pp. 18025{18030. issn: 0027-8424.
doi: 10.1073/pnas.1315099110. url: http://www.pnas.org/cgi/doi/10.
1073/pnas.1315099110
"""
# -------------- reading the csv file --------------
import csv
from InputData.input_data_classes import LeakData
import pickle
# -------------- Hard coded values --------------
# In some cases, a leak would be detected with an FID or IR camera, but no flux could be measured with the HI-FLOW
# sampler. In these cases, the study team assigned a flux of 0.001 cfm to the leak. These data are omitted from FEAST.
cfm_unmeasured_value = 0.001 # cfm
# Number of wells surveyed with an IR camera and FID in the Fort Worth study
n_wells_IR = 292
# Unit conversion from cfm to g/s (assuming standard conditions and pure methane)
cfm_to_gps = 0.0283/60*1e5/8.314/293*16
flux_IR = [] # g/s
counter = 0
flux = 0
with open('InputData/RawData/allen_leakdata_2013.csv') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
source_name = ''
for row in data:
if row[8][0].isdigit():
flux = float(row[8])*cfm_to_gps
if flux > 0:
flux_IR.append(float(row[8])*cfm_to_gps)
notes = \
"""Data extracted from D. Allen, V. M. Torres, et al. \Measurements of methane emissions at natural
gas production sites in the United States David". In: Proceedings of the National
Academy of Sciences 110.44 (Sept. 2013), pp. 18025{18030. issn: 0027-8424.
doi: 10.1073/pnas.1315099110. Flux data are recorded in grams/second."""
allen_leaks = LeakData(notes=notes, raw_file_name='Allen_leakdata_2013.csv', data_prep_file='allen_data_prep.py')
leak_data = {'IR': flux_IR}
well_counts = {'IR': n_wells_IR}
allen_leaks.define_data(leak_data=leak_data, well_counts=well_counts)
pickle.dump(allen_leaks, open('InputData/DataObjectInstances/allen_leaks.p', 'wb'))
| {
"repo_name": "EAOgroup/FEAST",
"path": "InputData/RawDataProcessingScripts/allen_data_prep.py",
"copies": "1",
"size": "2317",
"license": "mit",
"hash": 2467916929316536300,
"line_mean": 47.2978723404,
"line_max": 118,
"alpha_frac": 0.6754423824,
"autogenerated": false,
"ratio": 3.0770252324037184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.923801911027056,
"avg_score": 0.002889700906631683,
"num_lines": 47
} |
__all__ = ["EndOfTraining", "StopCriterion_ABC", "EpochWall", "GeometricEarlyStopping"]
class EndOfTraining(Exception) :
"""Exception raised when a training criteria is met"""
def __init__(self, stopCriterion) :
self.stopCriterion = stopCriterion
self.message = "End of training: %s" % stopCriterion.endMessage()
class StopCriterion_ABC(object) :
"""This defines the interface that a StopCriterion must expose"""
def __init__(self, *args, **kwrags) :
self.name = self.__class__.__name__
def stop(self, trainer) :
"""The actual function that is called by the trainer at each epoch. Must be implemented in children"""
raise NotImplemented("Must be implemented in child")
def endMessage(self) :
"""returns information about the reason why the training stopped"""
return self.name
class EpochWall(StopCriterion_ABC) :
"""Stops training when maxEpochs is reached"""
def __init__(self, maxEpochs) :
StopCriterion_ABC.__init__(self)
self.maxEpochs = maxEpochs
def stop(self, trainer) :
if trainer.store["runInfos"]["epoch"]+1 >= self.maxEpochs :
return True
return False
def endMessage(self) :
"""returns information about the reason why the training stopped"""
return "Reached epoch wall %s" % self.maxEpochs
class ScoreWall(StopCriterion_ABC) :
"""Stops training when a given score is reached"""
def __init__(self, wallValue, datasetMap, outputFunction, outputLayer = None) :
"""if outputLayer is None, will consider the average of all outputs"""
StopCriterion_ABC.__init__(self)
self.datasetMap = datasetMap
self.datasetName = None
self.outputLayer = outputLayer
self.outputFunction = outputFunction
self.wallValue = wallValue
def stop(self, trainer) :
if self.datasetName is None :
found = False
for name, m in trainer.maps.iteritems() :
if m is self.datasetMap :
self.datasetName = name
found = True
break
if not found :
raise ValueError("the trainer does not know the supplied dataset map")
if self.outputLayer is None :
curr = trainer.store["scores"][self.datasetName]["average"][self.outputFunction]
else :
curr = trainer.store["scores"][self.datasetName][self.outputLayer.name][self.outputFunction]
if curr <= self.wallValue :
return True
return False
def endMessage(self) :
"""returns information about the reason why the training stopped"""
return "Reached score wall %s" % self.wallValue
class GeometricEarlyStopping(StopCriterion_ABC) :
"""Geometrically increases the patiences with the epochs and stops the training when the patience is over."""
def __init__(self, datasetMap, patience, patienceIncreaseFactor, significantImprovement, outputFunction, descending = True, outputLayer = None) :
"""if outputLayer is None, will consider the average of all outputs.
:param boolean descending: If true, means that the score should go down during training (most cases). Use false if the opposite is true (ex: accuracy).
"""
StopCriterion_ABC.__init__(self)
self.outputLayer = outputLayer
self.outputFunction = outputFunction
self.datasetMap = datasetMap
self.mapName = None
self.patience = patience
self.patienceIncreaseFactor = patienceIncreaseFactor
self.wall = patience
self.significantImprovement = significantImprovement
self.descending = descending
self.bestScore = None
def stop(self, trainer) :
if self.wall <= 0 :
return True
if self.mapName is None :
found = False
for name, m in trainer.maps.iteritems() :
if m is self.datasetMap :
self.mapName = name
found = True
break
if not found :
raise ValueError("the trainer does not know the supplied dataset map")
try :
if self.outputLayer is None :
curr = trainer.store["scores"][self.mapName]["average"][self.outputFunction]
else :
curr = trainer.store["scores"][self.mapName][self.outputLayer.name][self.outputFunction]
if self.bestScore is None :
self.bestScore = curr
self.wall = self.patience
elif ( self.descending and curr <= (self.bestScore - self.significantImprovement) ) or ( not self.descending and curr >= (self.bestScore + self.significantImprovement) ) :
self.bestScore = curr
self.wall = max(self.patience, trainer.store["runInfos"]["epoch"] * self.patienceIncreaseFactor)
self.wall -= 1
except KeyError :
pass
return False
def endMessage(self) :
"""returns information about the reason why the training stopped"""
return "Early stopping, no patience left"
| {
"repo_name": "JonathanSeguin/Mariana",
"path": "Mariana/training/future/stopcriteria.py",
"copies": "2",
"size": "5267",
"license": "apache-2.0",
"hash": -2680357581434028000,
"line_mean": 38.9015151515,
"line_max": 183,
"alpha_frac": 0.6130624644,
"autogenerated": false,
"ratio": 4.317213114754098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5930275579154098,
"avg_score": null,
"num_lines": null
} |
__all__ = ['E']
import operator
import sys
import threading
import numpy
# Declare a double type that does not exist in Python space
double = numpy.double
# The default kind for undeclared variables
default_kind = 'double'
type_to_kind = {bool: 'bool', int: 'int', long: 'long', float: 'float',
double: 'double', complex: 'complex', str: 'str'}
kind_to_type = {'bool': bool, 'int': int, 'long': long, 'float': float,
'double': double, 'complex': complex, 'str': str}
kind_rank = ['bool', 'int', 'long', 'float', 'double', 'complex', 'none']
from numexpr import interpreter
class Expression(object):
def __init__(self):
object.__init__(self)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return VariableNode(name, default_kind)
E = Expression()
class Context(threading.local):
initialized = False
def __init__(self, dict_):
if self.initialized:
raise SystemError('__init__ called too many times')
self.initialized = True
self.__dict__.update(dict_)
def get(self, value, default):
return self.__dict__.get(value, default)
def get_current_context(self):
return self.__dict__
def set_new_context(self, dict_):
self.__dict__.update(dict_)
# This will be called each time the local object is used in a separate thread
_context = Context({})
def get_optimization():
return _context.get('optimization', 'none')
# helper functions for creating __magic__ methods
def ophelper(f):
def func(*args):
args = list(args)
for i, x in enumerate(args):
if isConstant(x):
args[i] = x = ConstantNode(x)
if not isinstance(x, ExpressionNode):
raise TypeError("unsupported object type: %s" % (type(x),))
return f(*args)
func.__name__ = f.__name__
func.__doc__ = f.__doc__
func.__dict__.update(f.__dict__)
return func
def allConstantNodes(args):
"returns True if args are all ConstantNodes."
for x in args:
if not isinstance(x, ConstantNode):
return False
return True
def isConstant(ex):
"Returns True if ex is a constant scalar of an allowed type."
return isinstance(ex, (bool, int, long, float, double, complex, str))
def commonKind(nodes):
node_kinds = [node.astKind for node in nodes]
str_count = node_kinds.count('str')
if 0 < str_count < len(node_kinds): # some args are strings, but not all
raise TypeError("strings can only be operated with strings")
if str_count > 0: # if there are some, all of them must be
return 'str'
n = -1
for x in nodes:
n = max(n, kind_rank.index(x.astKind))
return kind_rank[n]
max_int32 = 2147483647
min_int32 = -max_int32 - 1
def bestConstantType(x):
if isinstance(x, str): # ``numpy.string_`` is a subclass of ``str``
return str
# ``long`` objects are kept as is to allow the user to force
# promotion of results by using long constants, e.g. by operating
# a 32-bit array with a long (64-bit) constant.
if isinstance(x, (long, numpy.int64)):
return long
# ``double`` objects are kept as is to allow the user to force
# promotion of results by using double constants, e.g. by operating
# a float (32-bit) array with a double (64-bit) constant.
if isinstance(x, (double)):
return double
# Numeric conversion to boolean values is not tried because
# ``bool(1) == True`` (same for 0 and False), so 0 and 1 would be
# interpreted as booleans when ``False`` and ``True`` are already
# supported.
if isinstance(x, (bool, numpy.bool_)):
return bool
# ``long`` is not explicitly needed since ``int`` automatically
# returns longs when needed (since Python 2.3).
# The duality of float and double in Python avoids that we have to list
# ``double`` too.
for converter in int, float, complex:
try:
y = converter(x)
except StandardError, err:
continue
if x == y:
# Constants needing more than 32 bits are always
# considered ``long``, *regardless of the platform*, so we
# can clearly tell 32- and 64-bit constants apart.
if converter is int and not (min_int32 <= x <= max_int32):
return long
return converter
def getKind(x):
converter = bestConstantType(x)
return type_to_kind[converter]
def binop(opname, reversed=False, kind=None):
# Getting the named method from self (after reversal) does not
# always work (e.g. int constants do not have a __lt__ method).
opfunc = getattr(operator, "__%s__" % opname)
@ophelper
def operation(self, other):
if reversed:
self, other = other, self
if allConstantNodes([self, other]):
return ConstantNode(opfunc(self.value, other.value))
else:
return OpNode(opname, (self, other), kind=kind)
return operation
def func(func, minkind=None, maxkind=None):
@ophelper
def function(*args):
if allConstantNodes(args):
return ConstantNode(func(*[x.value for x in args]))
kind = commonKind(args)
if kind in ('int', 'long'):
# Exception for following NumPy casting rules
kind = 'double'
else:
# Apply regular casting rules
if minkind and kind_rank.index(minkind) > kind_rank.index(kind):
kind = minkind
if maxkind and kind_rank.index(maxkind) < kind_rank.index(kind):
kind = maxkind
return FuncNode(func.__name__, args, kind)
return function
@ophelper
def where_func(a, b, c):
if isinstance(a, ConstantNode):
raise ValueError("too many dimensions")
if allConstantNodes([a,b,c]):
return ConstantNode(numpy.where(a, b, c))
return FuncNode('where', [a,b,c])
def encode_axis(axis):
if isinstance(axis, ConstantNode):
axis = axis.value
if axis is None:
axis = interpreter.allaxes
else:
if axis < 0:
axis = interpreter.maxdims - axis
if axis > 254:
raise ValueError("cannot encode axis")
return RawNode(axis)
def sum_func(a, axis=-1):
axis = encode_axis(axis)
if isinstance(a, ConstantNode):
return a
if isinstance(a, (bool, int, long, float, double, complex)):
a = ConstantNode(a)
return FuncNode('sum', [a, axis], kind=a.astKind)
def prod_func(a, axis=-1):
axis = encode_axis(axis)
if isinstance(a, (bool, int, long, float, double, complex)):
a = ConstantNode(a)
if isinstance(a, ConstantNode):
return a
return FuncNode('prod', [a, axis], kind=a.astKind)
@ophelper
def div_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1./b.value)])
return OpNode('div', [a,b])
@ophelper
def pow_op(a, b):
if allConstantNodes([a,b]):
return ConstantNode(a**b)
if isinstance(b, ConstantNode):
x = b.value
if get_optimization() == 'aggressive':
RANGE = 50 # Approximate break even point with pow(x,y)
# Optimize all integral and half integral powers in [-RANGE, RANGE]
# Note: for complex numbers RANGE could be larger.
if (int(2*x) == 2*x) and (-RANGE <= abs(x) <= RANGE):
n = int(abs(x))
ishalfpower = int(abs(2*x)) % 2
def multiply(x, y):
if x is None: return y
return OpNode('mul', [x, y])
r = None
p = a
mask = 1
while True:
if (n & mask):
r = multiply(r, p)
mask <<= 1
if mask > n:
break
p = OpNode('mul', [p,p])
if ishalfpower:
kind = commonKind([a])
if kind in ('int', 'long'): kind = 'double'
r = multiply(r, OpNode('sqrt', [a], kind))
if r is None:
r = OpNode('ones_like', [a])
if x < 0:
r = OpNode('div', [ConstantNode(1), r])
return r
if get_optimization() in ('moderate', 'aggressive'):
if x == -1:
return OpNode('div', [ConstantNode(1),a])
if x == 0:
return FuncNode('ones_like', [a])
if x == 0.5:
kind = a.astKind
if kind in ('int', 'long'): kind = 'double'
return FuncNode('sqrt', [a], kind=kind)
if x == 1:
return a
if x == 2:
return OpNode('mul', [a,a])
return OpNode('pow', [a,b])
# The functions and the minimum and maximum types accepted
functions = {
'copy' : func(numpy.copy),
'ones_like' : func(numpy.ones_like),
'sqrt' : func(numpy.sqrt, 'float'),
'sin' : func(numpy.sin, 'float'),
'cos' : func(numpy.cos, 'float'),
'tan' : func(numpy.tan, 'float'),
'arcsin' : func(numpy.arcsin, 'float'),
'arccos' : func(numpy.arccos, 'float'),
'arctan' : func(numpy.arctan, 'float'),
'sinh' : func(numpy.sinh, 'float'),
'cosh' : func(numpy.cosh, 'float'),
'tanh' : func(numpy.tanh, 'float'),
'arcsinh' : func(numpy.arcsinh, 'float'),
'arccosh' : func(numpy.arccosh, 'float'),
'arctanh' : func(numpy.arctanh, 'float'),
'fmod' : func(numpy.fmod, 'float'),
'arctan2' : func(numpy.arctan2, 'float'),
'log' : func(numpy.log, 'float'),
'log1p' : func(numpy.log1p, 'float'),
'log10' : func(numpy.log10, 'float'),
'exp' : func(numpy.exp, 'float'),
'expm1' : func(numpy.expm1, 'float'),
'abs': func(numpy.absolute, 'float'),
'where' : where_func,
'real' : func(numpy.real, 'double', 'double'),
'imag' : func(numpy.imag, 'double', 'double'),
'complex' : func(complex, 'complex'),
'sum' : sum_func,
'prod' : prod_func,
}
class ExpressionNode(object):
"""An object that represents a generic number object.
This implements the number special methods so that we can keep
track of how this object has been used.
"""
astType = 'generic'
def __init__(self, value=None, kind=None, children=None):
object.__init__(self)
self.value = value
if kind is None:
kind = 'none'
self.astKind = kind
if children is None:
self.children = ()
else:
self.children = tuple(children)
def get_real(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).real)
return OpNode('real', (self,), 'double')
real = property(get_real)
def get_imag(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).imag)
return OpNode('imag', (self,), 'double')
imag = property(get_imag)
def __str__(self):
return '%s(%s, %s, %s)' % (self.__class__.__name__, self.value,
self.astKind, self.children)
def __repr__(self):
return self.__str__()
def __neg__(self):
return OpNode('neg', (self,))
def __invert__(self):
return OpNode('invert', (self,))
def __pos__(self):
return self
__add__ = __radd__ = binop('add')
__sub__ = binop('sub')
__rsub__ = binop('sub', reversed=True)
__mul__ = __rmul__ = binop('mul')
__div__ = div_op
__rdiv__ = binop('div', reversed=True)
__pow__ = pow_op
__rpow__ = binop('pow', reversed=True)
__mod__ = binop('mod')
__rmod__ = binop('mod', reversed=True)
# boolean operations
__and__ = binop('and', kind='bool')
__or__ = binop('or', kind='bool')
__gt__ = binop('gt', kind='bool')
__ge__ = binop('ge', kind='bool')
__eq__ = binop('eq', kind='bool')
__ne__ = binop('ne', kind='bool')
__lt__ = binop('gt', reversed=True, kind='bool')
__le__ = binop('ge', reversed=True, kind='bool')
class LeafNode(ExpressionNode):
leafNode = True
class VariableNode(LeafNode):
astType = 'variable'
def __init__(self, value=None, kind=None, children=None):
LeafNode.__init__(self, value=value, kind=kind)
class RawNode(object):
"""Used to pass raw integers to interpreter.
For instance, for selecting what function to use in func1.
Purposely don't inherit from ExpressionNode, since we don't wan't
this to be used for anything but being walked.
"""
astType = 'raw'
astKind = 'none'
def __init__(self, value):
self.value = value
self.children = ()
def __str__(self):
return 'RawNode(%s)' % (self.value,)
__repr__ = __str__
class ConstantNode(LeafNode):
astType = 'constant'
def __init__(self, value=None, children=None):
kind = getKind(value)
# Python float constants are double precision by default
if kind == 'float':
kind = 'double'
LeafNode.__init__(self, value=value, kind=kind)
def __neg__(self):
return ConstantNode(-self.value)
def __invert__(self):
return ConstantNode(~self.value)
class OpNode(ExpressionNode):
astType = 'op'
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
ExpressionNode.__init__(self, value=opcode, kind=kind, children=args)
class FuncNode(OpNode):
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
OpNode.__init__(self, opcode, args, kind)
| {
"repo_name": "erdc-cm/numexpr",
"path": "numexpr/expressions.py",
"copies": "2",
"size": "14057",
"license": "mit",
"hash": 9185184747742923000,
"line_mean": 32.3895486936,
"line_max": 79,
"alpha_frac": 0.5639183325,
"autogenerated": false,
"ratio": 3.6578194119177727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0070653130987836455,
"num_lines": 421
} |
__all__ = ['Enum', 'WidgetState', 'WidgetAction']
class Enum:
value = None
def __init__(self, initial=None):
self.value = initial
def get(self):
return self.value
def set(self, value):
self.value = value
def eq(self, second):
is_equal = False
if self.value == second:
is_equal = True
return is_equal
def __str__(self):
return str(self.value)
def __repr__(self):
return "<Enum value=" + self.get() + " >"
#------- GAME ENUMS -------
class GameState(Enum):
STATE_INITIALIZING = "init"
STATE_MENU = "menu"
STATE_LOADING = "..."
STATE_INGAME = "game"
class CoordinateAxis(Enum):
AXIS_X = "x"
AXIS_Y = "y"
#------- NOTIFICATION ENUMS -------
class MessageImportancy(Enum):
IMPORTANCY_UNSET = -1
IMPORTANCY_DEBUG = 0
IMPORTANCY_LOW = 1
IMPORTANCY_INFO = 2
IMPORTANCY_HIGH = 3
IMPORTANCY_DANGEROUS = 4
class NotificationCategory(Enum):
NOTI_UNSET = None
NOTI_UNIT = "unit"
NOTI_city = "city"
NOTI_INFO = "info"
NOTI_BATTLE = "battle"
NOTI_DIPLOMACY = "diplomacy"
#------- WIDGET ENUMS -------
# possible Widget actions
class WidgetAction(Enum):
ACTION_NONE = "default"
ACTION_ATTACK = "attack"
ACTION_MOVE = "move"
ACTION_BUILD = "build"
ACTION_NEXTTURN = "nextturn"
# possible Widget states
class WidgetState(Enum):
STATE_DEFAULT = "default"
STATE_PRESSED = "pressed"
STATE_DISABLED = "disabled"
STATE_INVALID = "invalid"
STATE_INVISIBLE = "invisible"
# set default
value = STATE_DEFAULT
#------- PLAYER ENUMS -------
# possible diplomatic player relations
class PlayerRelation(Enum):
RELATION_UNSEEN = "unseen"
RELATION_PEACE = "peace"
RELATION_NAP = "non-aggression-pact"
RELATION_NEUTRAL = "neutral"
RELATION_NEGATIVE = "negative"
RELATION_WAR = "war"
RELATION_GENOCIAL = "genocial"
# set default
value = RELATION_UNSEEN
# possible player types
class PlayerType(Enum):
PLAYER_HUMAN = "human"
PLAYER_AI_EASY = "ai:easy"
PLAYER_AI_MEDIUM = "ai:medium"
PLAYER_AI_HARD = "ai:hard"
PLAYER_SLOT_OPEN = "slot:open"
PLAYER_SLOT_CLOSED = "slot:closed"
#------- MAP ENUMS -------
# possible Tile landscapes
class TileLandscape(Enum):
LANDSCAPE_VOID = "X"
LANDSCAPE_WATER = "W"
LANDSCAPE_GRASSLANDS = "G"
LANDSCAPE_FOREST = "F"
LANDSCAPE_DESERT = "D"
LANDSCAPE_MOUNTAINS = "M"
LANDSCAPE__HILLS = "H"
LANDSCAPE_SAND = "S"
LANDSCAPE_RIVER = "R"
LANDSCAPE_GOLD = "*"
LANDSCAPE_IRON = "T"
LANDSCAPE_MARSH = "-"
LANDSCAPE_BRIDGE_HOR = "="
LANDSCAPE_BRIDGE_VERT = "H"
# possible Tile biomes
class TileBiome(Enum):
BIOME_UNSET = -3
BIOME_ARCTIC = -2
BIOME_TUNDRA = -1
BIOME_EUROPEAN = 0
BIOME_MEDIEVAL = 1
BIOME_TROPICAL = 2
BIOME_DRY = 3
# set default
value = BIOME_EUROPEAN
class Compass(Enum):
DIRECTION_NORTH = (0, -1)
DIRECTION_EAST = (-1, 0)
DIRECTION_SOUTH = (0, 1)
DIRECTION_WEST = (1, 0)
#------- CITY ENUMS -------
# possible City types
class CityType(Enum):
CITY_RUINS = -1
CITY_NONE = 0
CITY_VILLAGE = 1
CITY_TOWN = 2
CITY_CAPITAL = 3
# set default
value = 0
# possible Building types
class BuildingType(Enum):
BUILDING_NONE = "none"
BUILDING_WALL = "wall"
BUILDING_TOWER = "tower"
BUILDING_CENTER = "towncenter"
BUILDING_MARKETPLACE = "marketplace"
BUILDING_STABLES = "stables"
BUILDING_BORDEL = "bordel"
BUILDING_BLACKSMITH = "blacksmith"
BUILDING_HARBOR = "harbor"
BUILDING_TUNNELS = "tunnels"
BUILDING_SIEGEWORKSHOP = "siegeworkshop"
BUILDING_MANSION = "mansion"
# set default
value = BUILDING_NONE
#------- UNIT ENUMS -------
# possible unit type
class UnitCategory(Enum):
UNITCAT_INFANTRY = "INF"
UNITCAT_HEAVYINFANTRY = "HEAVY_INF"
UNITCAT_CAVALERY = "CAV"
UNITCAT_MACHINE = "MACHINE"
UNITCAT_CIVILIAN = "CIV"
class UnitType(Enum):
UNITTYPE_SOLIDER = "SOLDIER"
UNITTYPE_MERCENARY = "MERC"
UNITTYPE_GUARD = "GUARD"
UNITTYPE_RIDDEN = "RIDDEN"
UNITTYPE_KNIGHT = "KNIGHT"
UNITTYPE_SPY = "SPY"
UNITTYPE_SIEGE = "SIEGE"
UNITTYPE_SHIP = "SHIP"
# possible weapon types
class WeaponType(Enum):
WEAPON_SWORD = "SWORD"
WEAPON_SPEAR = "SPEAR"
WEAPON_BOW = "BOW"
WEAPON_SIEGE = "SIEGE"
WEAPON_MAGIC = "MAGIC"
class SkillLevel(Enum):
SKILL_NOVICE = 0
SKILL_TRAINED = 1
SKILL_VETERAN = 2
| {
"repo_name": "herrschr/prey-game",
"path": "pocketthrone/entities/enum.py",
"copies": "2",
"size": "4136",
"license": "bsd-2-clause",
"hash": -3858649575684925000,
"line_mean": 20.6544502618,
"line_max": 49,
"alpha_frac": 0.6774661509,
"autogenerated": false,
"ratio": 2.39768115942029,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.407514731032029,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Environment']
from typing import Dict, Any
import json
class Environment(object):
"""
Provides a static description of the state of the environment.
"""
@staticmethod
def from_file(fn: str) -> 'Environment':
with open(fn, "r") as f:
jsn = json.load(f)
return Environment.from_json(jsn)
@staticmethod
def from_json(jsn: Dict[str, Any]) -> 'Environment':
return Environment(jsn['constants'])
def __init__(self, values: Dict[str, Any]) -> None:
"""
Constructs a description of a mission environment.
Parameters:
values: a dictionary of environment constant values, indexed by
the name of those constants.
"""
self.__values = values.copy()
def __getitem__(self, name: str):
"""
Retrieves the value of an environmental variable with a given name.
"""
return self.__values[name]
def __eq__(self, other: 'Environment') -> bool:
return self.to_json() == other.to_json()
def __hash__(self) -> int:
vals = tuple(self.__values.items())
vals = sorted(vals, key=lambda x: x[0])
return hash(tuple(vals))
def to_json(self) -> Dict[str, Any]:
return {'constants': self.__values.copy()}
| {
"repo_name": "squaresLab/Houston",
"path": "houston/environment.py",
"copies": "1",
"size": "1316",
"license": "mit",
"hash": 7599759614270977000,
"line_mean": 27.6086956522,
"line_max": 75,
"alpha_frac": 0.5759878419,
"autogenerated": false,
"ratio": 4.049230769230769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
__all__ = ['EofStream',
'StreamReader', 'DataQueue', 'ChunksQueue',
'FlowControlStreamReader', 'FlowControlDataQueue',
'FlowControlChunksQueue']
import asyncio
import collections
import traceback
from .log import internal_logger
EOF_MARKER = b''
DEFAULT_LIMIT = 2 ** 16
class EofStream(Exception):
"""eof stream indication."""
class StreamReader(asyncio.StreamReader):
total_bytes = 0
def __init__(self, limit=DEFAULT_LIMIT, loop=None):
self._limit = limit
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._buffer = bytearray()
self._eof = False
self._waiter = None
self._eof_waiter = None
self._exception = None
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_exception(exc)
def feed_eof(self):
self._eof = True
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(True)
waiter = self._eof_waiter
if waiter is not None:
self._eof_waiter = None
if not waiter.cancelled():
waiter.set_result(True)
def is_eof(self):
"""Return True if 'feed_eof' was called."""
return self._eof
def at_eof(self):
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer
@asyncio.coroutine
def wait_eof(self):
if self._eof:
return
assert self._eof_waiter is None
self._eof_waiter = asyncio.Future(loop=self._loop)
try:
yield from self._eof_waiter
finally:
self._eof_waiter = None
def feed_data(self, data):
assert not self._eof, 'feed_data after feed_eof'
if not data:
return
self._buffer.extend(data)
self.total_bytes += len(data)
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(False)
def _create_waiter(self, func_name):
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError('%s() called while another coroutine is '
'already waiting for incoming data' % func_name)
return asyncio.Future(loop=self._loop)
@asyncio.coroutine
def readline(self):
if self._exception is not None:
raise self._exception
line = bytearray()
not_enough = True
while not_enough:
while self._buffer and not_enough:
ichar = self._buffer.find(b'\n')
if ichar < 0:
line.extend(self._buffer)
self._buffer.clear()
else:
ichar += 1
line.extend(self._buffer[:ichar])
del self._buffer[:ichar]
not_enough = False
if len(line) > self._limit:
raise ValueError('Line is too long')
if self._eof:
break
if not_enough:
self._waiter = self._create_waiter('readline')
try:
yield from self._waiter
finally:
self._waiter = None
if line:
return bytes(line)
else:
return EOF_MARKER
@asyncio.coroutine
def read(self, n=-1):
if self._exception is not None:
raise self._exception
# migration problem; with DataQueue you have to catch
# EofStream exception, so common way is to run payload.read() inside
# infinite loop. what can cause real infinite loop with StreamReader
# lets keep this code one major release.
if __debug__:
if self._eof and not self._buffer:
self._eof_counter = getattr(self, '_eof_counter', 0) + 1
if self._eof_counter > 5:
stack = traceback.format_stack()
internal_logger.warning(
'Multiple access to StreamReader in eof state, '
'might be infinite loop: \n%s', stack)
if not n:
return EOF_MARKER
if n < 0:
# This used to just loop creating a new waiter hoping to
# collect everything in self._buffer, but that would
# deadlock if the subprocess sends more than self.limit
# bytes. So just call self.read(self._limit) until EOF.
blocks = []
while True:
block = yield from self.read(self._limit)
if not block:
break
blocks.append(block)
data = b''.join(blocks)
if data:
return data
else:
return EOF_MARKER
else:
if not self._buffer and not self._eof:
self._waiter = self._create_waiter('read')
try:
yield from self._waiter
finally:
self._waiter = None
if n < 0 or len(self._buffer) <= n:
data = bytes(self._buffer)
self._buffer.clear()
else:
# n > 0 and len(self._buffer) > n
data = bytes(self._buffer[:n])
del self._buffer[:n]
if data:
return data
else:
return EOF_MARKER
@asyncio.coroutine
def readany(self):
if self._exception is not None:
raise self._exception
if not self._buffer and not self._eof:
self._waiter = self._create_waiter('readany')
try:
yield from self._waiter
finally:
self._waiter = None
data = bytes(self._buffer)
del self._buffer[:]
if data:
return data
else:
return EOF_MARKER
@asyncio.coroutine
def readexactly(self, n):
if self._exception is not None:
raise self._exception
# There used to be "optimized" code here. It created its own
# Future and waited until self._buffer had at least the n
# bytes, then called read(n). Unfortunately, this could pause
# the transport if the argument was larger than the pause
# limit (which is twice self._limit). So now we just read()
# into a local buffer.
blocks = []
while n > 0:
block = yield from self.read(n)
if not block:
partial = b''.join(blocks)
raise asyncio.streams.IncompleteReadError(
partial, len(partial) + n)
blocks.append(block)
n -= len(block)
return b''.join(blocks)
def read_nowait(self):
if self._exception is not None:
raise self._exception
if self._waiter and not self._waiter.done():
raise RuntimeError(
'Called while some coroutine is waiting for incoming data.')
if not self._buffer:
return EOF_MARKER
else:
data = bytes(self._buffer)
del self._buffer[:]
return data
class FlowControlStreamReader(StreamReader):
def __init__(self, stream, *args, **kwargs):
super().__init__(*args, **kwargs)
self._stream = stream
@asyncio.coroutine
def read(self, n=-1):
self._stream.resume_stream()
try:
return (yield from super().read(n))
finally:
self._stream.pause_stream()
@asyncio.coroutine
def readline(self):
self._stream.resume_stream()
try:
return (yield from super().readline())
finally:
self._stream.pause_stream()
@asyncio.coroutine
def readany(self):
self._stream.resume_stream()
try:
return (yield from super().readany())
finally:
self._stream.pause_stream()
@asyncio.coroutine
def readexactly(self, n):
self._stream.resume_stream()
try:
return (yield from super().readexactly(n))
finally:
self._stream.pause_stream()
class DataQueue:
"""DataQueue is a general-purpose blocking queue with one reader."""
def __init__(self, *, loop=None):
self._loop = loop
self._buffer = collections.deque()
self._eof = False
self._waiter = None
self._exception = None
def is_eof(self):
return self._eof
def at_eof(self):
return self._eof and not self._buffer
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.done():
waiter.set_exception(exc)
def feed_data(self, data):
self._buffer.append(data)
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(True)
def feed_eof(self):
self._eof = True
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(False)
@asyncio.coroutine
def read(self):
if not self._buffer and not self._eof:
if self._exception is not None:
raise self._exception
assert not self._waiter
self._waiter = asyncio.Future(loop=self._loop)
yield from self._waiter
if self._buffer:
return self._buffer.popleft()
else:
if self._exception is not None:
raise self._exception
else:
raise EofStream
class FlowControlDataQueue(DataQueue):
"""FlowControlDataQueue resumes and pauses an underlying stream.
It is a destination for parsed data."""
def __init__(self, stream, *, loop=None):
super().__init__(loop=loop)
self._stream = stream
@asyncio.coroutine
def read(self):
self._stream.resume_stream()
try:
return (yield from super().read())
finally:
self._stream.pause_stream()
class ChunksQueue(DataQueue):
"""Like a :class:`DataQueue`, but for binary chunked data transfer."""
@asyncio.coroutine
def read(self):
try:
return (yield from super().read())
except EofStream:
return EOF_MARKER
readany = read
class FlowControlChunksQueue(FlowControlDataQueue, ChunksQueue):
"""FlowControlChunksQueue resumes and pauses an underlying stream."""
readany = FlowControlDataQueue.read
| {
"repo_name": "saghul/aiohttp",
"path": "aiohttp/streams.py",
"copies": "1",
"size": "11487",
"license": "apache-2.0",
"hash": -6453895161939138000,
"line_mean": 27.5037220844,
"line_max": 79,
"alpha_frac": 0.5366065988,
"autogenerated": false,
"ratio": 4.504705882352941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 403
} |
__all__ = ["error_from_response", "parse_error", "Error", "ClientError",
"ServerError", "BadRequest", "Unauthorized", "InternalServerError",
"BadGateway", "ServiceUnavailable", "GatewayTimeout"]
def error_from_response(response_body, status_code):
if status_code not in ERRORS:
return None
error_class = ERRORS[status_code]
message = parse_error(response_body)
return error_class(message, status_code)
def parse_error(body):
if body is None or not any(body):
return ""
return body.error.message
class Error(Exception):
def __init__(self, message='', code=None):
super(Error, self).__init__(message)
self.status_code = code
class ClientError(Error):
"""Raised when AcceptOn returns a 4xx HTTP status code"""
class BadRequest(ClientError):
"""Raised when AcceptOn returns a 400 HTTP status code"""
class Unauthorized(ClientError):
"""Raised when AcceptOn returns a 401 HTTP status code"""
class NotFound(ClientError):
"""Raised when AcceptOn returns a 404 HTTP status code"""
class ServerError(Error):
"""Raised when AcceptOn returns a 5xx HTTP status code"""
class InternalServerError(ServerError):
"""Raised when AcceptOn returns a 500 HTTP status code"""
class BadGateway(ServerError):
"""Raised when AcceptOn returns a 502 HTTP status code"""
class ServiceUnavailable(ServerError):
"""Raised when AcceptOn returns a 503 HTTP status code"""
class GatewayTimeout(ServerError):
"""Raised when AcceptOn returns a 504 HTTP status code"""
ERRORS = {
400: BadRequest,
401: Unauthorized,
404: NotFound,
500: InternalServerError,
502: BadGateway,
503: ServiceUnavailable,
504: GatewayTimeout
}
| {
"repo_name": "accepton/accepton-python",
"path": "accepton/error.py",
"copies": "1",
"size": "1761",
"license": "mit",
"hash": 9047109225797212000,
"line_mean": 23.4583333333,
"line_max": 78,
"alpha_frac": 0.6871095968,
"autogenerated": false,
"ratio": 4.153301886792453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5340411483592452,
"avg_score": null,
"num_lines": null
} |
# all error responses
__ERROR__RESPONSES__ = {
0 : "Access Denied",
1 : "Parameter Missing: Function '%s'; Parameter '%s'",
2: "User '%s' Does Not Exist",
3: "Device '%s' Does Not Exist",
4: "Incorrect Connection Pin",
5: "Device Is Already Connected To A Host: Disconnect The Device And Try Connecting Again",
10 : "Function '%s' requires '%s' arguments; Not '%s' arguments",
21 : "Casting Error",
22 : "Incorrect Parameter Type: Regex does not match parameter '%s'",
14 : "API Handler Map Dictionary '%s' Does Not contain Method '%s'",
15 : "API Handler Map Does Not Contain Dictionary '%s'",
16 : "GET API Requests Must Use The 'get' Dictionary In The Permissions Map",
17 : "A POST API Request May Not Use The 'get' Dictionary",
24 : "API Post Payload is Missing a Required Field"
}
# throw error response
def throw(code, dataStruct=(), compiled=False):# dataStruct is used to add data to the messages and compile determines if the response should be auto-compiled
# format the response
response = {
'stat' : 'fail',
'code' : code,
'message' : __ERROR__RESPONSES__[code] % dataStruct
}
# return the response <compile if specicfied>
if compiled:
return compile(response)
else:
return response
# returns a successful response
def reply(data={}):
data['stat'] = 'ok'
return data
# compiles response for output
def compile(JSON):
import json
return json.dumps(JSON) | {
"repo_name": "HunterLarco/Jiro",
"path": "devserver/Jiro/API/response.py",
"copies": "1",
"size": "1438",
"license": "apache-2.0",
"hash": 3233852983304167400,
"line_mean": 32.4651162791,
"line_max": 158,
"alpha_frac": 0.6794158554,
"autogenerated": false,
"ratio": 3.7253886010362693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9794813607375938,
"avg_score": 0.021998169812066024,
"num_lines": 43
} |
# All error types described in DB API 2 are implemented the same way as in
# Django (1.11 to 3.0)., otherwise some exceptions are not correctly reported in it.
from importlib import import_module
from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Type, Union
import json
import warnings
import requests # noqa
# === Forward defs (they are first due to dependency)
class FakeReq:
"""A Fake Request is used for compatible error reporting in "composite" subrequests."""
def __init__(self,
method: str,
url: str,
data: str, # ?? Union[str, List[Any], Dict[str, Any]],
headers: Optional[Dict[str, str]] = None,
context: Optional[Dict[Any, Any]] = None
) -> None:
self.method = method
self.url = url
self.data = data
self.headers = headers or {} # type: Dict[str, str]
self.context = context or {} # type: Dict[Any, Any] # the key is Union[str, int]
@property
def body(self) -> str:
if isinstance(self.data, str):
return self.data
return json.dumps(self.data)
class FakeResp: # pylint:disable=too-many-instance-attributes
"""A Fake Response is used for compatible error reporting in "composite" subrequests."""
def __init__(self, status_code: int, headers: Mapping[str, str], text: str, request: FakeReq) -> None:
self.status_code = status_code
self.text = text
self.request = request
self.headers = headers
self.reason = None
GenResponse = requests.Response # (requests.Response, 'FakeResp')
# === Exception defs
class SalesforceWarning(Warning):
def __init__(self,
messages: Optional[Union[str, List[str]]] = None,
response: Optional[GenResponse] = None,
verbs: Optional[Iterable[str]] = None
) -> None:
self.data = [] # type: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]]
self.response = None # type: Optional[GenResponse]
self.verbs = None # type: Optional[Set[str]]
message = prepare_exception(self, messages, response, verbs)
super().__init__(message)
class Error(Exception):
"""
Database error that can get detailed error information from a SF REST API response.
customized for aproriate information, not too much or too little.
"""
def __init__(self,
messages: Optional[Union[str, List[str]]] = None,
response: Optional[GenResponse] = None,
verbs: Optional[Iterable[str]] = None
) -> None:
self.data = [] # type: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]]
self.response = None # type: Optional[GenResponse]
self.verbs = None # type: Optional[Set[str]]
message = prepare_exception(self, messages, response, verbs)
super().__init__(message)
class InterfaceError(Error):
pass # should be raised directly
class DatabaseError(Error):
pass
class SalesforceError(DatabaseError):
"""Error reported by SFDC data instance in a REST API request.
This class is for messages with ExceptionCode that can be searched in
"SOAP API documentation"
https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/sforce_api_calls_concepts_core_data_objects.htm#exception_code_topic
by the capitalized ExceptionCode: e.g. "DUPLICATE_VALUE \n some context about it"
Subclasses are for messages created by django-salesforce.
Timeout is also reported as a SalesforceError, because it can be frequently caused
by SFDC by a slow query. There is no ambiguity.
"""
class DataError(SalesforceError):
pass
class OperationalError(SalesforceError):
pass # e.g. network, auth
class IntegrityError(SalesforceError):
pass # e.g. foreign key (probably recently deleted obj)
class InternalError(SalesforceError):
pass
class ProgrammingError(SalesforceError):
pass # e.g sql syntax
class NotSupportedError(SalesforceError):
pass
class SalesforceAuthError(SalesforceError):
"""Error reported by SFDC in salesforce.auth at login request.
The messages are typically very cryptic. (probably intentionally,
to not disclosure more information to unathorized persons)
Repeated errors of this class can lock the user account temporarily.
"""
def prepare_exception(obj: Union[Error, SalesforceWarning],
messages: Optional[Union[str, List[str]]] = None,
response: Optional[GenResponse] = None,
verbs: Optional[Iterable[str]] = None
) -> str:
"""Prepare excetion params or only an exception message
parameters:
messages: list of strings, that will be separated by new line
response: response from a request to SFDC REST API
verbs: list of options about verbosity
"""
# pylint:disable=too-many-branches
verbs_ = set(verbs or [])
known_options = ['method+url']
if messages is None:
messages = []
if isinstance(messages, str):
messages = [messages]
assert isinstance(messages, list)
assert not verbs_.difference(known_options)
data = None
# a boolean from a failed response is False, though error messages in json should be decoded
if response is not None and 'json' in response.headers.get('Content-Type', '') and response.text:
data = json.loads(response.text)
if data:
data_0 = data[0]
if 'errorCode' in data_0:
subreq = ''
if 'referenceId' in data_0:
subreq = " (in subrequest {!r})".format(data_0['referenceId'])
messages = [data_0['errorCode'] + subreq] + messages
if data_0.get('fields'):
messages.append('FIELDS: {}'.format(data_0['fields']))
if len(data) > 1:
messages.append('MORE_ERRORS ({})'.format(len(data)))
if 'method+url' in verbs_:
assert response is not None and response.request.url
method = response.request.method
url = response.request.url
if len(url) > 100:
url = url[:100] + '...'
data_info = ''
if (method in ('POST', 'PATCH') and
(not response.request.body or 'json' not in response.request.headers.get('content-type', ''))):
data_info = ' (without json request data)'
messages.append('in {} "{}"{}'.format(method, url, data_info))
separ = '\n '
messages = [x.replace('\n', separ) for x in messages]
message = separ.join(messages)
if obj:
obj.data = data
obj.response = response
obj.verbs = verbs_
return message
def warn_sf(messages: Union[str, List[str]],
response: Optional[GenResponse],
verbs: Optional[Iterable[str]] = None,
klass: Type[SalesforceWarning] = SalesforceWarning
) -> None:
"""Issue a warning SalesforceWarning, with message combined from message and data from SFDC response"""
warnings.warn(klass(messages, response, verbs), stacklevel=2)
def import_string(dotted_path: str) -> Any:
# copied from django.utils.module_loading
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError as err:
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as err:
raise ImportError('Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
) from err
| {
"repo_name": "hynekcer/django-salesforce",
"path": "salesforce/dbapi/exceptions.py",
"copies": "2",
"size": "7963",
"license": "mit",
"hash": -1380240578440833500,
"line_mean": 34.5491071429,
"line_max": 135,
"alpha_frac": 0.6250156976,
"autogenerated": false,
"ratio": 4.062755102040816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5687770799640816,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.