id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/DearPyGui_DragAndDrop-1.0.0-py3-none-any.whl/DearPyGui_DragAndDrop/main.py | from __future__ import annotations
import traceback
from enum import IntEnum
from pathlib import Path
from typing import Type, TypeVar, Dict, Callable, Any, Union, List
import dearpygui.dearpygui as dpg
DragAndDropDataObject = Union[None, str, List[Path]]
SubscriptionTag = TypeVar('SubscriptionTag', bound=int)
class KEYSTATE(IntEnum):
LEFT = 1
RIGHT = 2
SHIFT = 4
CTRL = 8
MIDDLE = 16
ALT = 32
class DROPEFFECT(IntEnum):
NONE = 0
COPY = 1
MOVE = 2
_now_drop_effect: DROPEFFECT = DROPEFFECT.MOVE
def set_drop_effect(effect: DROPEFFECT = DROPEFFECT.NONE):
global _now_drop_effect
_now_drop_effect = effect
def get_drop_effect() -> DROPEFFECT:
return _now_drop_effect
class DragAndDrop():
__subscribers: Dict[SubscriptionTag, Type[DragAndDrop]] = {}
__subscription_tag: SubscriptionTag = None
def DragEnter(self, dataObject: DragAndDropDataObject, keyState: list[KEYSTATE]):
...
def DragOver(self, keyState: list[KEYSTATE]):
...
def DragLeave(self):
...
def Drop(self, dataObject: DragAndDropDataObject, keyState: list[KEYSTATE]):
...
def __init__(self):
if self.__subscription_tag:
self._unsubscribe(self.__subscription_tag)
self.__subscription_tag = self._subscribe(self) # noqa
def __del__(self):
if self.__subscription_tag:
self._unsubscribe(self.__subscription_tag)
@classmethod
def _subscribe(cls, self: Type[DragAndDrop]) -> SubscriptionTag:
subscription_tag = dpg.generate_uuid()
cls.__subscribers[subscription_tag] = self
return subscription_tag
@classmethod
def _unsubscribe(self, subscription_tag: SubscriptionTag):
if subscription_tag in self.__subscribers:
del self.__subscribers[subscription_tag]
@classmethod
def _DragEnter(cls, dataObject, keyState):
for self in cls.__subscribers.values():
try:
self.DragEnter(dataObject, keyState) # noqa
except Exception:
traceback.print_exc()
@classmethod
def _DragOver(cls, keyState):
for self in cls.__subscribers.values():
try:
self.DragOver(keyState) # noqa
except Exception:
traceback.print_exc()
@classmethod
def _DragLeave(cls):
for self in cls.__subscribers.values():
try:
self.DragLeave() # noqa
except Exception:
traceback.print_exc()
@classmethod
def _Drop(cls, dataObject, keyState):
for self in cls.__subscribers.values():
try:
self.Drop(dataObject, keyState) # noqa
except Exception:
traceback.print_exc()
class _DragAndDropForFunctions(DragAndDrop):
def DragEnter(self, dataObject, keyState):
...
def DragOver(self, keyState):
...
def DragLeave(self):
...
def Drop(self, dataObject, keyState):
...
def set_drag_enter(function: Callable[[DragAndDropDataObject, list[KEYSTATE]], Any] = None):
if function is None:
function = lambda *args, **kwargs: ...
_DragAndDropForFunctions.DragEnter = function
def set_drag_over(function: Callable[[list[KEYSTATE]], Any] = None):
if function is None:
function = lambda *args, **kwargs: ...
_DragAndDropForFunctions.DragOver = function
def set_drag_leave(function: Callable[[], Any] = None):
if function is None:
function = lambda *args, **kwargs: ...
_DragAndDropForFunctions.DragLeave = function
def set_drop(function: Callable[[DragAndDropDataObject, list[KEYSTATE]], Any] = None):
if function is None:
function = lambda *args, **kwargs: ...
_DragAndDropForFunctions.Drop = function | PypiClean |
/Cray-0.0.10.tar.gz/Cray-0.0.10/cray/jobs.py | import boto3
import cray.config as config
import cray.s3 as cs3
import subprocess
from os import getcwd
from shutil import copyfile
from pathlib import Path
def get_jobs(job_prefix=None):
prefix = config.job_prefix() + "/"
if job_prefix is not None:
prefix = prefix + job_prefix
return (
(j.lstrip(config.job_prefix() + "/"))
for j in cs3.list_subdirectories(config.bucket(), prefix)
)
def job_file_prefix(jobID, file):
return "{}/{}/{}".format(config.job_prefix(), jobID, file)
def get_ticket_jobs(ticket):
return get_jobs(ticket)
def get_job_config(job):
return cs3.load_json(
config.bucket(), "{}/{}/config.json".format(config.job_prefix(), job)
)
def is_job_cancelled(jobID):
return cs3.file_exists(config.bucket(), job_file_prefix(jobID, "job_cancelled.zip"))
def is_job_active(jobID):
return cs3.file_exists(config.bucket(), job_file_prefix(jobID, "job.zip"))
def exists(jobID):
return is_job_active(jobID) or is_job_cancelled(jobID)
def cancel_job(jobID):
s3 = boto3.resource("s3")
jobZip = job_file_prefix(jobID, "job.zip")
cancelledZip = job_file_prefix(jobID, "job_cancelled.zip")
s3.Object(config.bucket(), cancelledZip).copy_from(
CopySource="{}/{}".format(config.bucket(), jobZip)
)
s3.Object(config.bucket(), jobZip).delete()
def build_job_archive(
tempdir, config_path, ssh_dir, fix_windows_path, docker_image, environment
):
pwd = getcwd()
if fix_windows_path:
tempdir = tempdir.replace("\\", "/").replace("C:", "/c")
pwd = pwd.replace("\\", "/").replace("C:", "/c")
ssh_dir = ssh_dir.replace("\\", "/").replace("C:", "/c")
if config_path != "":
if Path(config_path).exists():
copyfile(config_path, f"{pwd}/config.json")
else:
raise FileNotFoundError(f"No such configuration file: {config_path}")
ssh_binding = ""
if ssh_dir != "":
ssh_binding = "-v {}:/root/.ssh:ro".format(ssh_dir)
if environment:
env_args = " ".join(f"-e {ei}" for ei in environment)
else:
env_args = ""
cmd = "docker run -v {}:/build {} -v {}:/output {} --rm {}".format(
pwd, ssh_binding, tempdir, env_args, docker_image
)
exit_code = subprocess.call(cmd, shell=True)
if exit_code != 0:
if env_args:
# remove secrets
cmd = "docker run -v {}:/build {} -v {}:/output {} --rm {}".format(
pwd,
ssh_binding,
tempdir,
" ".join(f"-e {ei.split('=')[0]}=..." for ei in environment),
docker_image,
)
raise Exception(
"'{}' submission failed: exited with code '{}'".format(cmd, exit_code)
)
def submit_job_zip(zip_path, jobID):
bucket = config.bucket()
prefix = "{}/{}.zip".format(config.submit_prefix(), jobID)
s3 = boto3.resource("s3")
s3.meta.client.upload_file(zip_path, bucket, prefix)
def scheduled_at(jobID):
return cs3.last_modified(config.bucket(), job_file_prefix(jobID, "config.json"))
def cancelled_at(jobID):
if not is_job_cancelled(jobID):
return None
return cs3.last_modified(
config.bucket(), job_file_prefix(jobID, "job_cancelled.zip")
) | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Tool/cyglink.py | from SCons.Tool.linkCommon import StringizeLibSymlinks, EmitLibSymlinks
from SCons.Util import CLVar, is_String
from . import gnulink
def cyglink_lib_emitter(target, source, env, **kw):
verbose = True
if 'variable_prefix' in kw:
var_prefix = kw['variable_prefix']
else:
var_prefix = 'SHLIB'
no_import_lib = env.get('no_import_lib', False)
if verbose:
print("cyglink_lib_emitter: target[0]={!r}".format(target[0].get_path()))
if not no_import_lib:
# Specify import lib and add to targets
import_lib = env.subst('$%s_IMPLIBNAME' % var_prefix, target=target, source=source)
import_lib_target = env.fs.File(import_lib)
import_lib_target.attributes.shared = 1
target.append(import_lib_target)
if verbose:
print("cyglink_lib_emitter: import_lib={}".format(import_lib))
print("cyglink_lib_emitter: target=%s" % target)
for tgt in target:
if is_String(tgt):
tgt = env.File(tgt)
tgt.attributes.shared = 1
return target, source
def cyglink_ldmodule_emitter(target, source, env, **kw):
return cyglink_lib_emitter(target, source, env, variable_prefix='LDMODULE')
def cyglink_shlib_symlink_emitter(target, source, env, **kw):
"""
On cygwin, we only create a symlink from the non-versioned implib to the versioned implib.
We don't version the shared library itself.
:param target:
:param source:
:param env:
:param kw:
:return:
"""
verbose = True
if 'variable_prefix' in kw:
var_prefix = kw['variable_prefix']
else:
var_prefix = 'SHLIB'
no_import_lib = env.get('no_import_lib', False)
if no_import_lib in ['1', 'True', 'true', True]:
if verbose:
print("cyglink_shlib_symlink_emitter: no_import_lib=%s" % no_import_lib)
return target, source
no_symlinks = env.subst('$%sNOVERSIONSYMLINKS' % var_prefix)
if no_symlinks in ['1', 'True', 'true', True]:
return target, source
shlibversion = env.subst('$%sVERSION' % var_prefix)
if shlibversion:
if verbose:
print("cyglink_shlib_symlink_emitter: %sVERSION=%s" % (var_prefix, shlibversion))
# The implib (added by the cyglink_lib_emitter)
imp_lib_node = target[1]
shlib_noversion_symlink = env.subst('$%s_NOVERSION_SYMLINK' % var_prefix, target=target[0], source=source)
if verbose:
print("cyglink_shlib_symlink_emitter: shlib_noversion_symlink :%s" % shlib_noversion_symlink)
print("cyglink_shlib_symlink_emitter: imp_lib_node :%s" % imp_lib_node)
symlinks = [(env.File(shlib_noversion_symlink), imp_lib_node)]
if verbose:
print("cyglink_shlib_symlink_emitter: symlinks={!r}".format(
', '.join(["%r->%r" % (k, v) for k, v in StringizeLibSymlinks(symlinks)])
))
if symlinks:
# This does the actual symlinking
EmitLibSymlinks(env, symlinks, target[0])
# This saves the information so if the versioned shared library is installed
# it can faithfully reproduce the correct symlinks
target[0].attributes.shliblinks = symlinks
return target, source
def cyglink_ldmod_symlink_emitter(target, source, env, **kw):
return cyglink_shlib_symlink_emitter(target, source, env, variable_prefix='LDMODULE')
def cyglink_shlibversion(target, source, env, for_signature):
var_prefix = 'SHLIB'
var = '%sVERSION' % var_prefix
if var not in env:
return ''
version = env.subst("$%s" % var, target=target, source=source)
version = version.replace('.', '-')
return "." + version
def cyglink_ldmodule_version(target, source, env, for_signature):
var_prefix = 'LDMODULE'
var = '%sVERSION' % var_prefix
if var not in env:
return ''
version = env.subst("$%s" % var, target=target, source=source)
version = version.replace('.', '-')
return "." + version
def _implib_pre_flags(target, source, env, for_signature):
no_import_lib = env.get('no_import_lib', False)
if no_import_lib in ['1', 'True', 'true', True]:
return ''
else:
return '-Wl,--out-implib=${TARGETS[1]} -Wl,--export-all-symbols -Wl,--enable-auto-import -Wl,--whole-archive'
def _implib_post_flags(target, source, env, for_signature):
no_import_lib = env.get('no_import_lib', False)
if no_import_lib in ['1', 'True', 'true', True]:
return ''
else:
return '-Wl,--no-whole-archive'
def generate(env):
"""Add Builders and construction variables for cyglink to an Environment."""
gnulink.generate(env)
env['LINKFLAGS'] = CLVar('-Wl,-no-undefined')
env['SHLIBPREFIX'] = 'cyg'
env['SHLIBSUFFIX'] = '.dll'
env['IMPLIBPREFIX'] = 'lib'
env['IMPLIBSUFFIX'] = '.dll.a'
# Variables used by versioned shared libraries
# SHLIBVERSIONFLAGS and LDMODULEVERSIONFLAGS are same as in gnulink...
env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS'
env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS'
env['_IMPLIB_PRE_SOURCES'] = _implib_pre_flags
env['_IMPLIB_POST_SOURCES'] = _implib_post_flags
env['SHLINKCOM'] = '$SHLINK -o $TARGET $SHLINKFLAGS $__SHLIBVERSIONFLAGS $__RPATH ' \
'$_IMPLIB_PRE_SOURCES $SOURCES $_IMPLIB_POST_SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LDMODULECOM'] = '$LDMODULE -o $TARGET $SHLINKFLAGS $__LDMODULEVERSIONFLAGS $__RPATH ' \
'$_IMPLIB_PRE_SOURCES $SOURCES $_IMPLIB_POST_SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
# Overwrite emitters. Cyglink does things differently when creating symlinks
env['SHLIBEMITTER'] = [cyglink_lib_emitter, cyglink_shlib_symlink_emitter]
env['LDMODULEEMITTER'] = [cyglink_ldmodule_emitter, cyglink_ldmod_symlink_emitter]
# This is the non versioned shlib filename
# If SHLIBVERSION is defined then this will symlink to $SHLIBNAME
env['SHLIB_NOVERSION_SYMLINK'] = '${IMPLIBPREFIX}$_get_shlib_stem${IMPLIBSUFFIX}'
env['LDMODULE_NOVERSION_SYMLINK'] = '${IMPLIBPREFIX}$_get_ldmodule_stem${IMPLIBSUFFIX}'
env['SHLIB_IMPLIBNAME'] = '${IMPLIBPREFIX}$_get_shlib_stem${_SHLIB_IMPLIBSUFFIX}'
env['LDMODULE_IMPLIBNAME'] = '${IMPLIBPREFIX}$_get_ldmodule_stem${_LDMODULE_IMPLIBSUFFIX}'
env['_cyglink_shlibversion'] = cyglink_shlibversion
env['_SHLIB_IMPLIBSUFFIX'] = '${_cyglink_shlibversion}${IMPLIBSUFFIX}'
env['_SHLIBSUFFIX'] = '${_cyglink_shlibversion}${SHLIBSUFFIX}'
env['_cyglink_ldmodule_version'] = cyglink_ldmodule_version
env['_LDMODULESUFFIX'] = '${_cyglink_ldmodule_version}${LDMODULESUFFIX}'
env['_LDMODULE_IMPLIBSUFFIX'] = '${_cyglink_ldmodule_version}${IMPLIBSUFFIX}'
# Remove variables set by default initialization which aren't needed/used by cyglink
# these variables were set by gnulink but are not used in cyglink
for rv in ['_SHLIBSONAME', '_LDMODULESONAME']:
if rv in env:
del env[rv]
def exists(env):
return gnulink.exists(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Mopidy-OE1-1.2.0.tar.gz/Mopidy-OE1-1.2.0/mopidy_oe1/library.py | from __future__ import unicode_literals
import logging
import re
from client import OE1Client
from mopidy import backend
from mopidy.models import Ref, Track
logger = logging.getLogger(__name__)
class OE1Uris(object):
ROOT = 'oe1:directory'
LIVE = 'oe1:live'
CAMPUS = 'oe1:campus'
ARCHIVE = 'oe1:archive'
class OE1LibraryProvider(backend.LibraryProvider):
root_directory = Ref.directory(uri=OE1Uris.ROOT, name='OE1')
root = [
Ref.track(uri=OE1Uris.LIVE, name='Live'),
Ref.track(uri=OE1Uris.CAMPUS, name='Campus'),
Ref.directory(uri=OE1Uris.ARCHIVE, name='7 Tage')]
def __init__(self, backend, client=OE1Client()):
super(OE1LibraryProvider, self).__init__(backend)
self.client = client
def browse(self, uri):
try:
library_uri = OE1LibraryUri.parse(uri)
except InvalidOE1Uri, e:
logger.error(e)
return []
if library_uri.uri_type == OE1UriType.ROOT:
return self.root
if library_uri.uri_type == OE1UriType.ARCHIVE:
return self._browse_archive()
if library_uri.uri_type == OE1UriType.ARCHIVE_DAY:
return self._browse_day(library_uri.day_id)
logger.warn('OE1LibraryProvider.browse called with uri '
'that does not support browsing: \'%s\'.' % uri)
return []
def _browse_archive(self):
return [Ref.directory(uri=str(OE1LibraryUri(OE1UriType.ARCHIVE_DAY,
day['id'])),
name=day['label'])
for day in self.client.get_days()]
def _get_track_title(self, item):
return '%s: %s' % (item['time'], item['title'])
def _browse_day(self, day_id):
return [Ref.track(uri=str(OE1LibraryUri(OE1UriType.ARCHIVE_ITEM,
day_id, item['id'])),
name=self._get_track_title(item))
for item in self.client.get_day(day_id)['items']]
def lookup(self, uri):
try:
library_uri = OE1LibraryUri.parse(uri)
except InvalidOE1Uri, e:
logger.error(e)
return []
if library_uri.uri_type == OE1UriType.LIVE:
return [Track(uri=OE1Uris.LIVE, name='Live')]
if library_uri.uri_type == OE1UriType.CAMPUS:
return [Track(uri=OE1Uris.CAMPUS, name='Campus')]
if library_uri.uri_type == OE1UriType.ARCHIVE_DAY:
return self._browse_day(library_uri.day_id)
if library_uri.uri_type == OE1UriType.ARCHIVE_ITEM:
return self._lookup_item(library_uri.day_id, library_uri.item_id)
logger.warn('OE1LibraryProvider.lookup called with uri '
'that does not support lookup: \'%s\'.' % uri)
return []
def _lookup_item(self, day_id, item_id):
item = self.client.get_item(day_id, item_id)
return [Track(uri=str(OE1LibraryUri(OE1UriType.ARCHIVE_ITEM,
day_id, item['id'])),
name=self._get_track_title(item))]
def refresh(self, uri=None):
self.client.refresh()
class OE1LibraryUri(object):
def __init__(self, uri_type, day_id=None, item_id=None):
self.uri_type = uri_type
self.day_id = day_id
self.item_id = item_id
archive_parse_expression = '^' + re.escape(OE1Uris.ARCHIVE) +\
':(?P<day_id>\d{8})(:(?P<item_id>\d+))?$'
archive_parser = re.compile(archive_parse_expression)
@staticmethod
def parse(uri):
if uri == OE1Uris.ROOT:
return OE1LibraryUri(OE1UriType.ROOT)
if uri == OE1Uris.LIVE:
return OE1LibraryUri(OE1UriType.LIVE)
if uri == OE1Uris.CAMPUS:
return OE1LibraryUri(OE1UriType.CAMPUS)
if uri == OE1Uris.ARCHIVE:
return OE1LibraryUri(OE1UriType.ARCHIVE)
matches = OE1LibraryUri.archive_parser.match(uri)
if matches is not None:
day_id = matches.group('day_id')
item_id = matches.group('item_id')
if day_id is not None:
if matches.group('item_id') is not None:
return OE1LibraryUri(OE1UriType.ARCHIVE_ITEM,
day_id, item_id)
return OE1LibraryUri(OE1UriType.ARCHIVE_DAY, day_id)
raise InvalidOE1Uri(uri)
def __str__(self):
if self.uri_type == OE1UriType.ROOT:
return OE1Uris.ROOT
if self.uri_type == OE1UriType.LIVE:
return OE1Uris.LIVE
if self.uri_type == OE1UriType.CAMPUS:
return OE1Uris.CAMPUS
if self.uri_type == OE1UriType.ARCHIVE:
return OE1Uris.ARCHIVE
if self.uri_type == OE1UriType.ARCHIVE_DAY:
return OE1Uris.ARCHIVE + ':' + self.day_id
if self.uri_type == OE1UriType.ARCHIVE_ITEM:
return OE1Uris.ARCHIVE + ':' + self.day_id + ':' + self.item_id
class InvalidOE1Uri(TypeError):
def __init__(self, uri):
super(TypeError, self).__init__(
'The URI is not a valid OE1LibraryUri: \'%s\'.' % uri)
class OE1UriType(object):
ROOT = 0
LIVE = 1
CAMPUS = 2
ARCHIVE = 3
ARCHIVE_DAY = 4
ARCHIVE_ITEM = 5 | PypiClean |
/Destipy-0.1.14.tar.gz/Destipy-0.1.14/destipy/endpoints/social.py | from destipy.utils.http_method import HTTPMethod
from destipy.utils.requester import Requester
class Social:
"""Social endpoints."""
def __init__(self, requester, logger):
self.requester: Requester = requester
self.logger = logger
self.SOCIAL_URL = "https://www.bungie.net/Platform/Social/"
async def GetFriendList(self, token: dict) -> dict:
"""Returns your Bungie Friend list
Args:
token (dict): The token for authentication
Returns:
dict: The friend list
"""
try:
self.logger.info("Getting friend list for {}...".format(token["membership_id"]))
url = self.SOCIAL_URL + "Friends/"
return await self.requester.request(method=HTTPMethod.GET, url=url, access_token=token["access_token"])
except Exception as ex:
self.logger.exception(ex)
async def GetFriendRequestList(self, token: dict) -> dict:
"""Returns your friend request queue.
Args:
token (dict): The token to use for authentication
Returns:
dict: The friend request queue
"""
try:
self.logger.info("Getting friend request list for {}...".format(token["membership_id"]))
url = self.SOCIAL_URL + "Friends/Requests/"
return await self.requester.request(method=HTTPMethod.GET, url=url, access_token=token["access_token"])
except Exception as ex:
self.logger.exception(ex)
async def IssueFriendRequest(self, token: dict, membership_id: str) -> dict:
"""Requests a friend relationship with the target user.
Any of the target user's linked membership ids are valid inputs.
Args:
token (dict): The token to use for authentication
membership_id (str): The membership id of the user you wish to add.
Returns:
dict: Whether or not the friend request was issued.
"""
try:
self.logger.info("Issuing friend request to {} for {}...".format(membership_id, token["membership_id"]))
url = self.SOCIAL_URL + "Friends/Add/{}/".format(membership_id)
return await self.requester.request(method=HTTPMethod.POST, url=url, access_token=token["access_token"], data={})
except Exception as ex:
self.logger.exception(ex)
async def AcceptFriendRequest(self, token: dict, membership_id: str) -> dict:
"""Accepts a friend relationship with the target user.
The user must be on your incoming friend request list, though no error will occur if they are not.
Args:
token (dict): The token to use for authentication
membership_id (str): The membership id of the user you wish to accept.
Returns:
dict: Whether or not the friend request was accepted.
"""
try:
self.logger.info("Accepting friend request from {} for {}...".format(membership_id, token["membership_id"]))
url = self.SOCIAL_URL + "Friends/Requests/Accept/{}/".format(membership_id)
return await self.requester.request(method=HTTPMethod.POST, url=url, access_token=token["access_token"], data={})
except Exception as ex:
self.logger.exception(ex)
async def DeclineFriendRequest(self, token: dict, membership_id: str) -> dict:
"""Declines a friend relationship with the target user.
The user must be on your incoming friend request list, though no error will occur if they are not.
Args:
token (dict): The token to use for authentication
membership_id (str): The membership id of the user you wish to decline.
Returns:
dict: Whether or not the friend request was declined.
"""
try:
self.logger.info("Decline friend request from {} for {}...".format(membership_id, token["membership_id"]))
url = self.SOCIAL_URL + "Friends/Add/{}/".format(membership_id)
return await self.requester.request(method=HTTPMethod.POST, url=url, access_token=token["access_token"], data={})
except Exception as ex:
self.logger.exception(ex)
async def RemoveFriend(self, token: dict, membership_id: str) -> dict:
"""Remove a friend relationship with the target user.
The user must be on your friend list, though no error will occur if they are not.
Args:
token (dict): The token to use for authentication
membership_id (str): The membership id of the user you wish to remove.
Returns:
dict: Whether or not the friend was removed.
"""
try:
self.logger.info("Removing friend {} for {}...".format(membership_id, token["membership_id"]))
url = self.SOCIAL_URL + "Friends/Remove/{}/".format(membership_id)
return await self.requester.request(method=HTTPMethod.POST, url=url, access_token=token["access_token"], data={})
except Exception as ex:
self.logger.exception(ex)
async def RemoveFriendRequest(self, token: dict, membership_id: str) -> dict:
"""Remove a friend relationship with the target user. The user must be on your outgoing request friend list, though no error will occur if they are not.
Args:
token (dict): The token to use for authentication
membership_id (str): The membership id of the user you wish to remove.
Returns:
dict: Whether or not the friend request was removed.
"""
try:
self.logger.info("Removing friend request from {} for {}...".format(membership_id, token["membership_id"]))
url = self.SOCIAL_URL + "Friends/Requests/Remove/{}/".format(membership_id)
return await self.requester.request(method=HTTPMethod.POST, url=url, access_token=token["access_token"], data={})
except Exception as ex:
self.logger.exception(ex)
async def GetPlatformFriendList(self, friend_platform: int, page: str) -> dict:
"""Gets the platform friend of the requested type, with additional information if they have Bungie accounts. Must have a recent login session with said platform.
Args:
friend_platform (int): The platform friend type.
page (str): The zero based page to return. Page size is 100
Returns:
dict: The platform friend list
"""
try:
self.logger.info("Getting platform friend list for {}...".format(friend_platform))
url = self.SOCIAL_URL + "PlatformFriends/{}/{}/".format(friend_platform, page)
return await self.requester.request(method=HTTPMethod.GET, url=url)
except Exception as ex:
self.logger.exception(ex) | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_ro-ro.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"a.m.",
"p.m."
],
"DAY": [
"duminic\u0103",
"luni",
"mar\u021bi",
"miercuri",
"joi",
"vineri",
"s\u00e2mb\u0103t\u0103"
],
"MONTH": [
"ianuarie",
"februarie",
"martie",
"aprilie",
"mai",
"iunie",
"iulie",
"august",
"septembrie",
"octombrie",
"noiembrie",
"decembrie"
],
"SHORTDAY": [
"Dum",
"Lun",
"Mar",
"Mie",
"Joi",
"Vin",
"S\u00e2m"
],
"SHORTMONTH": [
"ian.",
"feb.",
"mar.",
"apr.",
"mai",
"iun.",
"iul.",
"aug.",
"sept.",
"oct.",
"nov.",
"dec."
],
"fullDate": "EEEE, d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "dd.MM.y HH:mm",
"shortDate": "dd.MM.y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "RON",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "ro-ro",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } if (vf.v != 0 || n == 0 || n != 1 && n % 100 >= 1 && n % 100 <= 19) { return PLURAL_CATEGORY.FEW; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/OLCTools-1.3.5.tar.gz/OLCTools-1.3.5/olctools/accessoryFunctions/sra_confirmation.py | from olctools.accessoryFunctions.accessoryFunctions import SetupLogging
from argparse import ArgumentParser
import logging
import pandas
import os
__author__ = 'adamkoziol'
class Check(object):
def main(self):
self.load_biosample_table()
self.load_curation_table()
self.load_sra_table()
self.compare_tables()
def load_biosample_table(self):
"""
Use pandas to load the Biosample Excel file. Extract the values for the accession, Title, and OrganismName
columns
"""
logging.info('Loading Biosample table')
# Use pandas to create a nested dictionary for the biosample table
biosample_dictionary = self.load_table(self.biosample_table)
# Iterate through the dictionary, and extract values for the 'accession', 'Title', and 'OrganismName' columns
for primary_key, header in biosample_dictionary.items():
# Add the accession, genus and title values to the dictionary
self.biosample_title_dict[primary_key] = {
'title': header['Title'],
'accession': header['accession'],
'organism': header['OrganismName']
}
def load_curation_table(self):
"""
Parse the Access database outputs with pandas, and create a dictionary containing the values for the required
keys
"""
logging.info('Loading Access database curation table')
# Use pandas to create a nested dictionary for the curation table
curation_dictionary = self.load_table(self.curation_table)
# Iterate through the dictionary, and add the required key: value pairs to the dictionary
for primary_key, header in curation_dictionary.items():
self.curation_title_dict[primary_key] = {
'title': header['CFIAID'],
'seqid': header['SEQID'],
'genus': header['Genus'],
'species': header['Species'],
'mash': str(header['MASH_ReferenceGenome']),
'curator_flag': header['CuratorFlag']
}
def load_sra_table(self):
"""
Parse the SRA metadata table with pandas, and create a dictionary containing the values for the required keys
"""
logging.info('Loading SRA metadata table')
# Use pandas to create a nested dictionary for the SRA metadata table
sra_dictionary = self.load_table(self.sra_table,
sheet_name='SRA_data')
# Iterate through the dictionary, and add the required key: value pairs to the dictionary
for primary_key, header in sra_dictionary.items():
# The 'title' column contains the CFIA ID joined to the organism e.g. CFIAFB20180146_L. monocytogenes
# split on the underscore
title, organism = header['title'].split('_')
self.sra_title_dict[primary_key] = {
'title': title,
'seqid': header['library_ID'],
'accession': header['biosample_accession'],
'organism': organism,
'forward': header['filename'],
'reverse': header['filename2']
}
def compare_tables(self):
"""
"""
logging.info('Finding errors and discrepancies between tables')
curation_fails = set()
curation_passes = set()
for sra_key, sra_dict in self.sra_title_dict.items():
# Extract the CFIA ID, SEQ ID and accession used in the SRA table
cfia_id = sra_dict['title']
seqid = sra_dict['seqid']
accession = sra_dict['accession']
# Check to see if the genus is the right one, and that it has been entered correctly
try:
assert self.organism_conversion_dict[self.genus] in sra_dict['organism']
except AssertionError:
logging.warning('SRA incorrect organism:', cfia_id, self.genus, sra_dict['organism'])
# Ensure that the file names are formatted properly
try:
assert sra_dict['seqid'] in sra_dict['forward'] and sra_dict['seqid'] in sra_dict['reverse']
except AssertionError:
logging.warning('Filename mismatch:', cfia_id, self.genus, sra_dict['seqid'], sra_dict['forward'],
sra_dict['reverse'])
# Compare the SRA table entries against the BioSample table
bio_sample_present = False
for biosample_key, biosample_dict in self.biosample_title_dict.items():
# Find the matching CFIA ID
if biosample_dict['title'] == cfia_id:
bio_sample_present = True
# Make sure the genus is correct
try:
assert self.genus in biosample_dict['organism']
except AssertionError:
logging.warning('Genus mismatch biosample table:', cfia_id, self.genus,
biosample_dict['organism'])
# Ensure that the accessions match
try:
assert accession == biosample_dict['accession']
except AssertionError:
logging.warning('Accession mismatch biosample table:', cfia_id, sra_dict['accession'],
biosample_dict['accession'])
# Indicate that the BioSample table is missing an entry
if not bio_sample_present:
logging.warning('No entry in the Biosample table:', cfia_id, sra_dict['seqid'], sra_dict['accession'])
# Compare the SRA table and the Access database curation table
curation_present = False
for curation_key, curation_dict in self.curation_title_dict.items():
curation_pass = True
# Find the matching CFIA ID in the curation table. Note that the table can have multiple samples with
# the same ID
if curation_dict['title'] == cfia_id:
curation_present = True
# Check to see if there is an entry with a matching SEQ ID
try:
assert seqid == curation_dict['seqid']
except AssertionError:
curation_pass = False
# Ensure that the genus is correct
try:
assert self.genus in curation_dict['genus']
except AssertionError:
curation_pass = False
# Ensure that the MASH prediction is correct
try:
assert self.genus in curation_dict['mash']
except AssertionError:
curation_pass = False
# Confirm that the sample passes curation
try:
assert 'REFERENCE' in curation_dict['curator_flag'] or 'PASS' in curation_dict['curator_flag']
except AssertionError:
curation_pass = False
# Add the entry to the appropriate list
if not curation_pass:
curation_fails.add(cfia_id)
else:
curation_passes.add(cfia_id)
# Indicate that there was no entry in the curation table
if not curation_present:
logging.warning('No entry in the curation table: ', cfia_id, 'SEQ ID:', sra_dict['seqid'], 'Accession:',
sra_dict['accession'])
# Iterate through all the entries that
for cfia_id in sorted(curation_fails - curation_passes):
logging.warning('Curation fail:', cfia_id)
# Iterate through the SRA table to return the correct entry
for sra_key, sra_dict in self.sra_title_dict.items():
if sra_dict['title'] == cfia_id:
logging.warning('\tSRA entry: ', cfia_id, 'SEQ ID:', sra_dict['seqid'], 'Accession:',
sra_dict['accession'])
# Iterate through the Access curation table, and return the entry/entries corresponding to the CFIA ID
for curation_key, curation_dict in self.curation_title_dict.items():
if curation_dict['title'] == cfia_id:
logging.warning('\tCuration entry:', cfia_id, 'SEQ ID:', curation_dict['seqid'], 'Genus:',
curation_dict['genus'], 'MASH genus:', curation_dict['mash'],
'Curator flag:', curation_dict['curator_flag'])
@staticmethod
def load_table(table_name, sheet_name=None):
"""
Create nested dictionary from Excel file using pandas
:param: table_name: Name and path of Excel file to process
:param: sheet_name: Optional name of sheet in the Excel file to use. If not supplied, pandas will read the first
sheet
:return nested dictionary of primary key: header: value
"""
# A dictionary to store the parsed excel file in a more readable format
nesteddictionary = dict()
if sheet_name:
dictionary = pandas.read_excel(table_name, sheet_name=sheet_name).to_dict()
else:
dictionary = pandas.read_excel(table_name).to_dict()
for header in dictionary:
# primary_key is the primary key, and value is the value of the cell for that key + header combination
for primary_key, value in dictionary[header].items():
# Update the dictionary with the new data
try:
nesteddictionary[primary_key].update({header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
nesteddictionary[primary_key] = dict()
nesteddictionary[primary_key].update({header: value})
return nesteddictionary
def __init__(self, path, biosampletable, curationtable, sratable, genus):
if path.startswith('~'):
self.path = os.path.abspath(os.path.expanduser(os.path.join(path)))
else:
self.path = os.path.abspath(os.path.join(path))
self.biosample_table = os.path.join(self.path, biosampletable)
self.curation_table = os.path.join(self.path, curationtable)
self.sra_table = os.path.join(self.path, sratable)
self.genus = genus
self.biosample_title_dict = dict()
self.curation_title_dict = dict()
self.sra_title_dict = dict()
self.organism_conversion_dict = {
'Camplylobacter': 'C. jejuni',
'Escherichia': 'E. coli',
'Listeria': 'L. monocytogenes',
'Salmonella': 'S. enterica',
}
SetupLogging(filehandle=os.path.join(self.path, 'log.txt'),
log_level=logging.WARN)
def cli():
parser = ArgumentParser(description='Confirm that SRA metadata table has correct biosample accessions, SEQIDs, '
'only contains the genus of interest, and passes curation')
parser.add_argument('-p', '--path',
required=True,
help='Path to folder containing necessary tables')
parser.add_argument('-b', '--biosampletable',
required=True,
help='Name of Biosample table from NCBI (must be in the supplied path)')
parser.add_argument('-c', '--curationtable',
required=True,
help='Name of Curation table from Access database(must be in the supplied path)')
parser.add_argument('-s', '--sratable',
required=True,
help='Name of SRA metadata table (must be in the supplied path)')
parser.add_argument('-g', '--genus',
choices=['Campylobacter', 'Escherichia', 'Listeria', 'Salmonella'],
required=True,
help='Genus of samples being uploaded')
arguments = parser.parse_args()
check = Check(path=arguments.path,
biosampletable=arguments.biosampletable,
curationtable=arguments.curationtable,
sratable=arguments.sratable,
genus=arguments.genus)
check.main()
if __name__ == '__main__':
cli() | PypiClean |
/MDS_big_advantrue-0.1.tar.gz/MDS_big_advantrue-0.1/MDS_Big_Adventure/item/sparkLance.py | class sparkLance():
#initialize
def __init__(self, name, hp, po, status):
self.__name = name
self.__hp = hp
self.__po = po
self.__status = status
#multiply the health value to the player
def betaTransform(self, p):
try:
p.health = p.health*self.__hp
except Exception as ex:
print(ex)
print("Something is wrong with the player")
#multiply the power value to the player
def getSpacium(self, p):
try:
p.power = p.power*self.__po
except Exception as ex:
print(ex)
print("Something is wrong with the player")
#get the plot when sparklance is activated
def getplot(self):
return ["""
As soon as you touched the Spark Lance, it started glowing and immersed you in light.
You were startled at first, but soon you felt something, something strange but familiar.
It felt like those sunny afternoons, when you look out from the window,
everything is so quite under the clear blue sky, you feel peaceful.
It then felt like the times when you finished the very last exam of the term, you are relieved, and hope
is coming back to you agian.
After that, you felt courage, it was like recieving an quiz score better than you had hoped, you started to believe
in yourself.
Suddenly, You heard a voice in the light:
"The only way to save the world, is you become the light"
Yes, I can be the light! You thought.
You raised the Spark Lance high:
Ultraman !!!!!!!!!!
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Exciting BGM playing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*********************************************Fancy visual effects *************************************************
You transformed into Ultraman with the power of the light, you now have special powers to save the world.
""",
"""
"It is you..." The Tim Hortons staff said slowly.
"Many years ago an old lady came here and ordered turkey bacon club,
I told her we only have breakfast at this branch, but she came back again
the following day and ordered the same thing. After I told her the
that we really don't sell turkey bacon here, she smiled and gave something to me, wait here please."
The staff hurried into the storage room. He came back with a strange looking artifact in his hand.
Strangly, your attantion is captured by it, as if it is speaking to you.
"This is Spark Lance," said the staff, "the old lady said if one day this world is
in trouble, someone would come here to order turkey bacon club, I should tell that
person the same thing I told her at the first time. If that person came back again,
I should give this to them."
The Tim Horton's staff handed the artifact to you"""]
#add status
def check(self):
self.__status += 1
#return status
def getstatus(self):
return self.__status | PypiClean |
/Adytum-PyMonitor-1.0.5.tar.bz2/Adytum-PyMonitor-1.0.5/lib/math/base.py | NOTATION10 = '0123456789'
NOTATION36 = '0123456789abcdefghijklmnopqrstuvwxyz'
NOTATION65 = '-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz'
NOTATION68 = '$,-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~'
NOTATION69 = '%+-.0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~'
NOTATION70 = "!'()*-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~"
NOTATION90 = "!'#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_abcdefghijklmnopqrstuvwxyz{|}"
legal_bases = [10, 36, 65, 68, 69, 70, 90]
class BaseConvert(object):
'''
"Base" class for base conversions ;-)
'''
__metaclass__ = type
base = None
notation = None
notation_map = None
def _convert(self, n):
'''
Private function for doing conversions; returns a list
'''
if True not in [ isinstance(1, x) for x in [long, int, float] ]:
raise TypeError, 'parameters bust be numbers'
converted = []
quotient, remainder = divmod(n, self.base)
converted.append(remainder)
if quotient != 0:
converted.extend(self._convert(quotient))
return converted
"""
def convert(self, n):
'''
General conversion function
'''
nums = self._convert(n)
nums.reverse()
return self.getNotation(nums)
"""
def convert(self, n, tobase=None, frombase=10):
try:
n = int(n)
except:
raise "The first parameter of 'convert' needs to be an integer!"
if not tobase:
tobase = self.base
'''
nums = [ self.notation_map[x] for x in str(n) ]
nums.reverse()
total = 0
for number in nums:
total += 1 * number
number *= frombase
if total == 0:
return '0'
converted = []
while total:
total, remainder = divmod(total, tobase)
converted.append(self.notation[remainder])
'''
converted = [ self.notation[x] for x in self._convert(n) ]
converted.reverse()
return ''.join(converted)
def getNotation(self, list_of_remainders):
'''
Get the notational representation of the converted number
'''
return ''.join([ self.notation[x] for x in list_of_remainders ])
doc_template = '''
>>> b = Base%s()
>>> zero = b.convert(0)
>>> ten = b.convert(10)
>>> hund = b.convert(100)
>>> thou = b.convert(1000)
>>> mil = b.convert(1000000)
>>> bil = b.convert(100000000)
>>> goog = b.convert(10**10)
>>> print (zero, ten, hund, thou, mil, bil, goog)
>>> zero = b.convert(zero, newbase=10, oldbase=b.base)
>>> ten = b.convert(ten, newbase=10, oldbase=b.base)
>>> hund = b.convert(hund, newbase=10, oldbase=b.base)
>>> thou = b.convert(thou, newbase=10, oldbase=b.base)
>>> mil = b.convert(mil, newbase=10, oldbase=b.base)
>>> bil = b.convert(bil, newbase=10, oldbase=b.base)
>>> goog = b.convert(goog, newbase=10, oldbase=b.base)
>>> print (zero, ten, hund, thou, mil, bil, goog)
'''
# build classes for whatever notations exist
import new
for base in [ str(x) for x in legal_bases ]:
base_klass = globals()['BaseConvert']
klass_name = 'Base'+base
notation = eval('NOTATION'+base)
notation_map = dict([ (y, x) for x, y in enumerate(notation) ])
#klass = type(klass_name, (base_klass,), {})
# {'__metaclass__':type(base_klass())})
klass = new.classobj(klass_name, (base_klass,), {'__doc__':doc_template%base})
klass.base = int(base)
klass.notation = notation
klass.notation_map = notation_map
globals()[klass_name] = klass
def _test():
import doctest, base
doctest.testmod(base)
if __name__ == '__main__':
_test() | PypiClean |
/CustomShellCreator-0.0.5.tar.gz/CustomShellCreator-0.0.5/README.md | # CustomShellCreator
## About
CustomShellCreator is a module that allows you to create a custom shell
### Author
The author is [chinmaym505](https://github.com/chinmaym505)
### License
CustomShellCreator has been released under the Apache Software License
## How to use this module
```
import CustomShellCreator.shell as shell
myShell = shell.shell({command (type: string):[has arguments? (1 for yes 0 for no),variable to store argument (type: string, keep as empty string if no argument)),code to run for command (type: string)], (other things if any)})
myShell.run()
```
### Example
```
import CustomShellCreator.shell as shell
myShell = shell.shell({"sayHi":[1,"name","""print(f"Hello there, {name}!")"""],"sayMeow":[0,"","""print("meow!")"""]})
myShell.run()
``` | PypiClean |
/MovieRecEngine-0.1.1.tar.gz/MovieRecEngine-0.1.1/README.md | # MovieRecEngine
MovieRecEngine be an abbreviation of Movie Recommendation Engine. This is a simple collaborative filtering based library using Pytorch Sequential Neural Network to make your Movie Recommendation System easy.
*This library is in very early-stage currently! So, there might be remarkable changes.*
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install MovieRecEngine.
```bash
pip install MovieRecEngine
```
## Description
MovieRecEngine uses collaborative filtering to find similarities between users and items simultaneously to provide recommendations. This allows for serendipitous recommendations; that is, collaborative filtering models can recommend an item to user A based on the interests of a similar user B. Furthermore, the embeddings can be learned automatically, without relying on hand-engineering of features.
MovieRecEngine uses pyptorch sequential Neural Networks to train a model that can predict users rating for an unseen movie based on his/her past interests/ratings provided.
MovieRecEngine, uses [tez](https://pypi.org/project/tez/) simple pytorch trainer that supports cpu and gpu training.
## How to use MovieRecEngine
* To train a model using MovieRecEngine, define a Dataset that contains columns "userId", "movieId", "ratings". Example [Train sample](https://github.com/MrR0b0t-23/MovieRecEngine/blob/main/Examples/Train_Sample.csv)
* Create a object for ```Train ``` class in MovieRecEngine library with parameters trainDatasetPath, userLabelEncoderPath, movieLabelEncoderPath, validDatasetSize, trainBatchSize, validBatchSize, device, nEpochs, trainedModelPath, randomState.
* Train the model by calling ```train``` function in ```Train``` class.
* To predict user movie ratings using MovieRecEngine, define a Dataset that contains columns "userId", "movieId", "ratings". Example [Predict sample](https://github.com/MrR0b0t-23/MovieRecEngine/blob/main/Examples/Predict_Sample.csv)
*NOTE: "userId" needs to contain 1 unique userId.*
* Create a object for ```Predict ``` class in MovieRecEngine library with parameters datasetPath, userLabelEncoderPath, movieLabelEncoderPath, trainedModelPath, predictBatchSize, device.
* Predict user movie ratings by calling ```predict``` function in ```Predict ``` class.
## Parameters
1. ```Train``` class:
- trainDatasetPath ==> Path for your training Dataset.
- userLabelEncoderPath ==> Path in which you want to save user Label Encoder (this will be used in your prediction)
- movieLabelEncoderPath ==> Path in which you want to save movie Label Encoder (this will be used your prediction)
- validDatasetSize ==> Test size for train_test_split
- trainBatchSize ==> The number of train samples to work through before updating the internal model parameters.
- validBatchSize ==> The number of test samples to work through before updating the internal model parameters.
- device ==> Device in which you want to train your model 'cuda' or 'cpu'. Default 'cpu'.
- nEpochs ==> The number times that the learning algorithm will work through the entire training dataset.
- trainedModelPath ==> Path to save your trained model (this will be used in your prediction)
- randomState ==> Random State values for train_test_split
2. ```Predict``` class:
- datasetPath ==> Path for your prediction Dataset.
- userLabelEncoderPath ==> Path in which you saved user Label Encoder (while training)
- movieLabelEncoderPath ==> Path in which you saved movie Label Encoder (while training)
- trainedModelPath ==> Path in which you saved Trained model (while training)
- predictBatchSize ==> The number of prediction samples to work
- device ==> Device in which you want to train your model 'cuda' or 'cpu'. Default 'cpu'.
## Contributing
Currently, we are not accepting any pull requests! All PRs will be closed. If you want a feature or something doesn't work, please create an [issue](https://github.com/MrR0b0t-23/MovieRecEngine/issues).
| PypiClean |
/OctoBot-Backtesting-1.9.1.tar.gz/OctoBot-Backtesting-1.9.1/octobot_backtesting/channels_manager.py | import asyncio
import copy
import async_channel.channels as channels
import async_channel.enums as channel_enums
import octobot_commons.channels_name as channels_name
import octobot_commons.list_util as list_util
import octobot_commons.logging as logging
import octobot_commons.asyncio_tools as asyncio_tools
class ChannelsManager:
DEFAULT_REFRESH_TIMEOUT = 15
def __init__(self, exchange_ids, matrix_id, refresh_timeout=DEFAULT_REFRESH_TIMEOUT):
self.logger = logging.get_logger(self.__class__.__name__)
self.exchange_ids = exchange_ids
self.matrix_id = matrix_id
self.refresh_timeout = refresh_timeout
self.producers = []
self.initial_producers = []
self.iteration_task = None
self.should_stop = False
self.producers_by_priority_levels = {}
async def initialize(self) -> None:
"""
Initialize Backtesting channels manager
"""
self.logger.debug("Initializing producers...")
try:
self.initial_producers = list_util.flatten_list(_get_backtesting_producers() +
self._get_trading_producers() +
self._get_evaluator_producers())
self.producers = copy.copy(self.initial_producers)
self.producers_by_priority_levels = {
priority_level.value: self.producers
for priority_level in channel_enums.ChannelConsumerPriorityLevels
}
# Initialize all producers by calling producer.start()
for producer in list_util.flatten_list(self._get_trading_producers() + self._get_evaluator_producers()):
await producer.start()
except Exception as exception:
self.logger.exception(exception, True, f"Error when initializing backtesting: {exception}")
raise
def clear_empty_channels_producers(self):
self.producers = [
producer
for producer in self.initial_producers
if producer.channel.get_consumers()
]
def update_producers_by_priority_levels(self):
self.producers_by_priority_levels = {
priority_level.value: _get_producers_with_priority_level_consumers(self.producers, priority_level.value)
for priority_level in channel_enums.ChannelConsumerPriorityLevels
if _check_producers_has_priority_consumers(self.producers, priority_level.value)
}
async def handle_new_iteration(self, current_timestamp) -> None:
for level_key, producers in self.producers_by_priority_levels.items():
try:
if _check_producers_consumers_emptiness(producers, level_key):
# avoid creating tasks when not necessary
continue
self.iteration_task = self.refresh_priority_level(producers, level_key, True)
await self.iteration_task
# trigger waiting events
await asyncio_tools.wait_asyncio_next_cycle()
# massive slow down
# self.iteration_task = await asyncio.wait_for(self.refresh_priority_level(level_key.value, True),
# timeout=self.refresh_timeout)
except asyncio.TimeoutError:
self.logger.error(f"Refreshing priority level {level_key} has been timed out at timestamp "
f"{current_timestamp}.")
async def refresh_priority_level(self, producers, priority_level: int, join_consumers: bool) -> None:
while not self.should_stop:
for producer in producers:
await producer.synchronized_perform_consumers_queue(priority_level, join_consumers, self.refresh_timeout)
if _check_producers_consumers_emptiness(self.producers, priority_level):
break
def stop(self):
self.should_stop = True
def flush(self):
self.producers = []
self.initial_producers = []
self.producers_by_priority_levels = {}
self.iteration_task = None
def _get_trading_producers(self):
import octobot_trading.exchange_channel as exchange_channel
return [
_get_channel_producers(exchange_channel.get_chan(channel_name.value, exchange_id))
for exchange_id in self.exchange_ids
for channel_name in channels_name.OctoBotTradingChannelsName
]
def _get_evaluator_producers(self):
import octobot_evaluators.evaluators.channel as evaluators_channel
return [
_get_channel_producers(evaluators_channel.get_chan(channel_name.value, self.matrix_id))
for channel_name in channels_name.OctoBotEvaluatorsChannelsName
]
def _get_channel_producers(channel):
if channel.producers:
return channel.producers
return [channel.get_internal_producer()]
def _get_backtesting_producers():
return [
_get_channel_producers(channels.get_chan(channel_name.value))
for channel_name in channels_name.OctoBotBacktestingChannelsName
]
def _check_producers_consumers_emptiness(producers, priority_level):
for producer in producers:
if not producer.is_consumers_queue_empty(priority_level):
return False
return True
def _check_producers_has_priority_consumers(producers, priority_level):
for producer in producers:
if producer.channel.get_prioritized_consumers(priority_level):
return True
return False
def _get_producers_with_priority_level_consumers(producers, priority_level):
return [
producer
for producer in producers
if producer.channel.get_prioritized_consumers(priority_level)
] | PypiClean |
/Aggrescan3D-1.0.2.tar.gz/Aggrescan3D-1.0.2/a3d_gui/static/js/hyp.js | var Hyphenator=(function(window){'use strict';var contextWindow=window,supportedLangs=(function(){var r={},o=function(code,file,script,prompt){r[code]={'file':file,'script':script,'prompt':prompt};};o('be','be.js',1,'Мова гэтага сайта не можа быць вызначаны аўтаматычна. Калі ласка пакажыце мову:');o('ca','ca.js',0,'');o('cs','cs.js',0,'Jazyk této internetové stránky nebyl automaticky rozpoznán. Určete prosím její jazyk:');o('da','da.js',0,'Denne websides sprog kunne ikke bestemmes. Angiv venligst sprog:');o('bn','bn.js',4,'');o('de','de.js',0,'Die Sprache dieser Webseite konnte nicht automatisch bestimmt werden. Bitte Sprache angeben:');o('el','el-monoton.js',6,'');o('el-monoton','el-monoton.js',6,'');o('el-polyton','el-polyton.js',6,'');o('en','en-us.js',0,'The language of this website could not be determined automatically. Please indicate the main language:');o('en-gb','en-gb.js',0,'The language of this website could not be determined automatically. Please indicate the main language:');o('en-us','en-us.js',0,'The language of this website could not be determined automatically. Please indicate the main language:');o('eo','eo.js',0,'La lingvo de ĉi tiu retpaĝo ne rekoneblas aŭtomate. Bonvolu indiki ĝian ĉeflingvon:');o('es','es.js',0,'El idioma del sitio no pudo determinarse autom%E1ticamente. Por favor, indique el idioma principal:');o('et','et.js',0,'Veebilehe keele tuvastamine ebaõnnestus, palun valige kasutatud keel:');o('fi','fi.js',0,'Sivun kielt%E4 ei tunnistettu automaattisesti. M%E4%E4rit%E4 sivun p%E4%E4kieli:');o('fr','fr.js',0,'La langue de ce site n%u2019a pas pu %EAtre d%E9termin%E9e automatiquement. Veuillez indiquer une langue, s.v.p.%A0:');o('grc','grc.js',6,'');o('gu','gu.js',7,'');o('hi','hi.js',5,'');o('hu','hu.js',0,'A weboldal nyelvét nem sikerült automatikusan megállapítani. Kérem adja meg a nyelvet:');o('hy','hy.js',3,'Չհաջողվեց հայտնաբերել այս կայքի լեզուն։ Խնդրում ենք նշեք հիմնական լեզուն՝');o('it','it.js',0,'Lingua del sito sconosciuta. Indicare una lingua, per favore:');o('kn','kn.js',8,'ಜಾಲ ತಾಣದ ಭಾಷೆಯನ್ನು ನಿರ್ಧರಿಸಲು ಸಾಧ್ಯವಾಗುತ್ತಿಲ್ಲ. ದಯವಿಟ್ಟು ಮುಖ್ಯ ಭಾಷೆಯನ್ನು ಸೂಚಿಸಿ:');o('la','la.js',0,'');o('lt','lt.js',0,'Nepavyko automatiškai nustatyti šios svetainės kalbos. Prašome įvesti kalbą:');o('lv','lv.js',0,'Šīs lapas valodu nevarēja noteikt automātiski. Lūdzu norādiet pamata valodu:');o('ml','ml.js',10,'ഈ വെ%u0D2C%u0D4D%u200Cസൈറ്റിന്റെ ഭാഷ കണ്ടുപിടിയ്ക്കാ%u0D28%u0D4D%u200D കഴിഞ്ഞില്ല. ഭാഷ ഏതാണെന്നു തിരഞ്ഞെടുക്കുക:');o('nb','nb-no.js',0,'Nettstedets språk kunne ikke finnes automatisk. Vennligst oppgi språk:');o('no','nb-no.js',0,'Nettstedets språk kunne ikke finnes automatisk. Vennligst oppgi språk:');o('nb-no','nb-no.js',0,'Nettstedets språk kunne ikke finnes automatisk. Vennligst oppgi språk:');o('nl','nl.js',0,'De taal van deze website kan niet automatisch worden bepaald. Geef de hoofdtaal op:');o('or','or.js',11,'');o('pa','pa.js',13,'');o('pl','pl.js',0,'Języka tej strony nie można ustalić automatycznie. Proszę wskazać język:');o('pt','pt.js',0,'A língua deste site não pôde ser determinada automaticamente. Por favor indique a língua principal:');o('ru','ru.js',1,'Язык этого сайта не может быть определен автоматически. Пожалуйста укажите язык:');o('sk','sk.js',0,'');o('sl','sl.js',0,'Jezika te spletne strani ni bilo mogoče samodejno določiti. Prosim navedite jezik:');o('sr-cyrl','sr-cyrl.js',1,'Језик овог сајта није детектован аутоматски. Молим вас наведите језик:');o('sr-latn','sr-latn.js',0,'Jezika te spletne strani ni bilo mogoče samodejno določiti. Prosim navedite jezik:');o('sv','sv.js',0,'Spr%E5ket p%E5 den h%E4r webbplatsen kunde inte avg%F6ras automatiskt. V%E4nligen ange:');o('ta','ta.js',14,'');o('te','te.js',15,'');o('tr','tr.js',0,'Bu web sitesinin dili otomatik olarak tespit edilememiştir. Lütfen dökümanın dilini seçiniz%A0:');o('uk','uk.js',1,'Мова цього веб-сайту не може бути визначена автоматично. Будь ласка, вкажіть головну мову:');o('ro','ro.js',0,'Limba acestui sit nu a putut fi determinată automat. Alege limba principală:');return r;}()),basePath=(function(){var s=contextWindow.document.getElementsByTagName('script'),i=0,p,src,t=s[i],r='';while(!!t){if(!!t.src){src=t.src;p=src.indexOf('Hyphenator.js');if(p!==-1){r=src.substring(0,p);}}i+=1;t=s[i];}return!!r?r:'//hyphenator.googlecode.com/svn/trunk/';}()),isLocal=(function(){var re=false;if(window.location.href.indexOf(basePath)!==-1){re=true;}return re;}()),documentLoaded=false,persistentConfig=false,doFrames=false,dontHyphenate={'video':true,'audio':true,'script':true,'code':true,'pre':true,'img':true,'br':true,'samp':true,'kbd':true,'var':true,'abbr':true,'acronym':true,'sub':true,'sup':true,'button':true,'option':true,'label':true,'textarea':true,'input':true,'math':true,'svg':true,'style':true},enableCache=true,storageType='local',storage,enableReducedPatternSet=false,enableRemoteLoading=true,displayToggleBox=false,onError=function(e){window.alert("Hyphenator.js says:\n\nAn Error occurred:\n"+e.message);},onWarning=function(e){window.console.log(e.message);},createElem=function(tagname,context){context=context||contextWindow;var el;if(window.document.createElementNS){el=context.document.createElementNS('http://www.w3.org/1999/xhtml',tagname);}else if(window.document.createElement){el=context.document.createElement(tagname);}return el;},css3=false,css3_h9n,css3_gethsupport=function(){var s,createLangSupportChecker=function(prefix){var testStrings=['aabbccddeeffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz','абвгдеёжзийклмнопрстуфхцчшщъыьэюя','أبتثجحخدذرزسشصضطظعغفقكلمنهوي','աբգդեզէըթժիլխծկհձղճմյնշոչպջռսվտրցւփքօֆ','ঁংঃঅআইঈউঊঋঌএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহ়ঽািীুূৃৄেৈোৌ্ৎৗড়ঢ়য়ৠৡৢৣ','ँंःअआइईउऊऋऌएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्॒॑ॠॡॢॣ','αβγδεζηθικλμνξοπρσςτυφχψω','બહઅઆઇઈઉઊઋૠએઐઓઔાિીુૂૃૄૢૣેૈોૌકખગઘઙચછજઝઞટઠડઢણતથદધનપફસભમયરલળવશષ','ಂಃಅಆಇಈಉಊಋಌಎಏಐಒಓಔಕಖಗಘಙಚಛಜಝಞಟಠಡಢಣತಥದಧನಪಫಬಭಮಯರಱಲಳವಶಷಸಹಽಾಿೀುೂೃೄೆೇೈೊೋೌ್ೕೖೞೠೡ','ກຂຄງຈຊຍດຕຖທນບປຜຝພຟມຢຣລວສຫອຮະັາິີຶືຸູົຼເແໂໃໄ່້໊໋ໜໝ','ംഃഅആഇഈഉഊഋഌഎഏഐഒഓഔകഖഗഘങചഛജഝഞടഠഡഢണതഥദധനപഫബഭമയരറലളഴവശഷസഹാിീുൂൃെേൈൊോൌ്ൗൠൡൺൻർൽൾൿ','ଁଂଃଅଆଇଈଉଊଋଌଏଐଓଔକଖଗଘଙଚଛଜଝଞଟଠଡଢଣତଥଦଧନପଫବଭମଯରଲଳଵଶଷସହାିୀୁୂୃେୈୋୌ୍ୗୠୡ','أبتثجحخدذرزسشصضطظعغفقكلمنهوي','ਁਂਃਅਆਇਈਉਊਏਐਓਔਕਖਗਘਙਚਛਜਝਞਟਠਡਢਣਤਥਦਧਨਪਫਬਭਮਯਰਲਲ਼ਵਸ਼ਸਹਾਿੀੁੂੇੈੋੌ੍ੰੱ','ஃஅஆஇஈஉஊஎஏஐஒஓஔகஙசஜஞடணதநனபமயரறலளழவஷஸஹாிீுூெேைொோௌ்ௗ','ఁంఃఅఆఇఈఉఊఋఌఎఏఐఒఓఔకఖగఘఙచఛజఝఞటఠడఢణతథదధనపఫబభమయరఱలళవశషసహాిీుూృౄెేైొోౌ్ౕౖౠౡ'],f=function(lang){var shadow,computedHeight,bdy,r=false;if(this.supportedBrowserLangs.hasOwnProperty(lang)){r=this.supportedBrowserLangs[lang];}else if(supportedLangs.hasOwnProperty(lang)){bdy=window.document.getElementsByTagName('body')[0];shadow=createElem('div',window);shadow.id='Hyphenator_LanguageChecker';shadow.style.width='5em';shadow.style[prefix]='auto';shadow.style.hyphens='auto';shadow.style.fontSize='12px';shadow.style.lineHeight='12px';shadow.style.visibility='hidden';shadow.lang=lang;shadow.style['-webkit-locale']="'"+lang+"'";shadow.innerHTML=testStrings[supportedLangs[lang].script];bdy.appendChild(shadow);computedHeight=shadow.offsetHeight;bdy.removeChild(shadow);r=(computedHeight>12)?true:false;this.supportedBrowserLangs[lang]=r;}else{r=false;}return r;};return f;},r={support:false,supportedBrowserLangs:{},property:'',checkLangSupport:{}};if(window.getComputedStyle){s=window.getComputedStyle(window.document.getElementsByTagName('body')[0],null);}else{css3_h9n=r;return;}if(s.hyphens!==undefined){r.support=true;r.property='hyphens';r.checkLangSupport=createLangSupportChecker('hyphens');}else if(s['-webkit-hyphens']!==undefined){r.support=true;r.property='-webkit-hyphens';r.checkLangSupport=createLangSupportChecker('-webkit-hyphens');}else if(s.MozHyphens!==undefined){r.support=true;r.property='-moz-hyphens';r.checkLangSupport=createLangSupportChecker('MozHyphens');}else if(s['-ms-hyphens']!==undefined){r.support=true;r.property='-ms-hyphens';r.checkLangSupport=createLangSupportChecker('-ms-hyphens');}css3_h9n=r;},hyphenateClass='hyphenate',urlHyphenateClass='urlhyphenate',classPrefix='Hyphenator'+Math.round(Math.random()*1000),hideClass=classPrefix+'hide',hideClassRegExp=new RegExp("\\s?\\b"+hideClass+"\\b","g"),unhideClass=classPrefix+'unhide',unhideClassRegExp=new RegExp("\\s?\\b"+unhideClass+"\\b","g"),css3hyphenateClass=classPrefix+'css3hyphenate',css3hyphenateClassHandle,dontHyphenateClass='donthyphenate',min=6,orphanControl=1,isBookmarklet=(function(){var loc=null,re=false,scripts=contextWindow.document.getElementsByTagName('script'),i=0,l=scripts.length;while(!re&&i<l){loc=scripts[i].getAttribute('src');if(!!loc&&loc.indexOf('Hyphenator.js?bm=true')!==-1){re=true;}i+=1;}return re;}()),mainLanguage=null,defaultLanguage='',elements=(function(){var Element=function(element){this.element=element;this.hyphenated=false;this.treated=false;},ElementCollection=function(){this.count=0;this.hyCount=0;this.list={};};ElementCollection.prototype={add:function(el,lang){var elo=new Element(el);if(!this.list.hasOwnProperty(lang)){this.list[lang]=[];}this.list[lang].push(elo);this.count+=1;return elo;},remove:function(el){var lang,i,e,l;for(lang in this.list){if(this.list.hasOwnProperty(lang)){for(i=0;i<this.list[lang].length;i+=1){if(this.list[lang][i].element===el){e=i;l=lang;break;}}}}this.list[l].splice(e,1);this.count-=1;this.hyCount-=1;},each:function(fn){var k;for(k in this.list){if(this.list.hasOwnProperty(k)){if(fn.length===2){fn(k,this.list[k]);}else{fn(this.list[k]);}}}}};return new ElementCollection();}()),exceptions={},docLanguages={},url='(\\w*:\/\/)?((\\w*:)?(\\w*)@)?((([\\d]{1,3}\\.){3}([\\d]{1,3}))|((www\\.|[a-zA-Z]\\.)?[a-zA-Z0-9\\-\\.]+\\.([a-z]{2,4})))(:\\d*)?(\/[\\w#!:\\.?\\+=&%@!\\-]*)*',mail='[\\w-\\.]+@[\\w\\.]+',urlOrMailRE=new RegExp('('+url+')|('+mail+')','i'),zeroWidthSpace=(function(){var zws,ua=window.navigator.userAgent.toLowerCase();zws=String.fromCharCode(8203);if(ua.indexOf('msie 6')!==-1){zws='';}if(ua.indexOf('opera')!==-1&&ua.indexOf('version/10.00')!==-1){zws='';}return zws;}()),onBeforeWordHyphenation=function(word){return word;},onAfterWordHyphenation=function(word){return word;},onHyphenationDone=function(context){return context;},selectorFunction=false,mySelectorFunction=function(hyphenateClass){var tmp,el=[],i,l;if(window.document.getElementsByClassName){el=contextWindow.document.getElementsByClassName(hyphenateClass);}else if(window.document.querySelectorAll){el=contextWindow.document.querySelectorAll('.'+hyphenateClass);}else{tmp=contextWindow.document.getElementsByTagName('*');l=tmp.length;for(i=0;i<l;i+=1){if(tmp[i].className.indexOf(hyphenateClass)!==-1&&tmp[i].className.indexOf(dontHyphenateClass)===-1){el.push(tmp[i]);}}}return el;},selectElements=function(){var elems;if(selectorFunction){elems=selectorFunction();}else{elems=mySelectorFunction(hyphenateClass);}return elems;},intermediateState='hidden',unhide='wait',CSSEditors=[],CSSEdit=function(w){w=w||window;var doc=w.document,sheet=(function(){var i,l=doc.styleSheets.length,s,element,r=false;for(i=0;i<l;i+=1){s=doc.styleSheets[i];try{if(!!s.cssRules){r=s;break;}}catch(ignore){}}r=false;if(r===false){element=doc.createElement('style');element.type='text/css';doc.getElementsByTagName('head')[0].appendChild(element);r=doc.styleSheets[doc.styleSheets.length-1];}return r;}()),changes=[],findRule=function(sel){var s,rule,sheets=w.document.styleSheets,rules,i,j,r=false;for(i=0;i<sheets.length;i+=1){s=sheets[i];try{if(!!s.cssRules){rules=s.cssRules;}else if(!!s.rules){rules=s.rules;}}catch(ignore){}if(!!rules&&!!rules.length){for(j=0;j<rules.length;j+=1){rule=rules[j];if(rule.selectorText===sel){r={index:j,rule:rule};}}}}return r;},addRule=function(sel,rulesStr){var i,r;if(!!sheet.insertRule){if(!!sheet.cssRules){i=sheet.cssRules.length;}else{i=0;}r=sheet.insertRule(sel+'{'+rulesStr+'}',i);}else if(!!sheet.addRule){if(!!sheet.rules){i=sheet.rules.length;}else{i=0;}sheet.addRule(sel,rulesStr,i);r=i;}return r;},removeRule=function(sheet,index){if(sheet.deleteRule){sheet.deleteRule(index);}else{sheet.removeRule(index);}};return{setRule:function(sel,rulesString){var i,existingRule,cssText;existingRule=findRule(sel);if(!!existingRule){if(!!existingRule.rule.cssText){cssText=existingRule.rule.cssText;}else{cssText=existingRule.rule.style.cssText.toLowerCase();}if(cssText==='.'+hyphenateClass+' { visibility: hidden; }'){changes.push({sheet:existingRule.rule.parentStyleSheet,index:existingRule.index});}else if(cssText.indexOf('visibility: hidden')!==-1){i=addRule(sel,rulesString);changes.push({sheet:sheet,index:i});existingRule.rule.style.visibility='';}else{i=addRule(sel,rulesString);changes.push({sheet:sheet,index:i});}}else{i=addRule(sel,rulesString);changes.push({sheet:sheet,index:i});}},clearChanges:function(){var change=changes.pop();while(!!change){removeRule(change.sheet,change.index);change=changes.pop();}}};},hyphen=String.fromCharCode(173),urlhyphen=zeroWidthSpace,hyphenateURL=function(url){var tmp=url.replace(/([:\/\.\?#&\-_,;!@]+)/gi,'$&'+urlhyphen),parts=tmp.split(urlhyphen),i;for(i=0;i<parts.length;i+=1){if(parts[i].length>(2*min)){parts[i]=parts[i].replace(/(\w{3})(\w)/gi,"$1"+urlhyphen+"$2");}}if(parts[parts.length-1]===""){parts.pop();}return parts.join(urlhyphen);},safeCopy=true,hyphRunFor={},runWhenLoaded=function(w,f){var toplevel,add=window.document.addEventListener?'addEventListener':'attachEvent',rem=window.document.addEventListener?'removeEventListener':'detachEvent',pre=window.document.addEventListener?'':'on',init=function(context){if(hyphRunFor[context.location.href]){onWarning(new Error("Warning: multiple execution of Hyphenator.run() – This may slow down the script!"));}contextWindow=context||window;f();hyphRunFor[contextWindow.location.href]=true;},doScrollCheck=function(){try{w.document.documentElement.doScroll("left");}catch(error){window.setTimeout(doScrollCheck,1);return;}if(!hyphRunFor[w.location.href]){documentLoaded=true;init(w);}},doOnEvent=function(e){var i,fl,haveAccess;if(!!e&&e.type==='readystatechange'&&w.document.readyState!=='interactive'&&w.document.readyState!=='complete'){return;}w.document[rem](pre+'DOMContentLoaded',doOnEvent,false);w.document[rem](pre+'readystatechange',doOnEvent,false);fl=w.frames.length;if(fl===0||!doFrames){w[rem](pre+'load',doOnEvent,false);documentLoaded=true;init(w);}else if(doFrames&&fl>0){if(!!e&&e.type==='load'){w[rem](pre+'load',doOnEvent,false);for(i=0;i<fl;i+=1){haveAccess=undefined;try{haveAccess=window.frames[i].document.toString();}catch(err){haveAccess=undefined;}if(!!haveAccess){runWhenLoaded(w.frames[i],f);}}init(w);}}};if(documentLoaded||w.document.readyState==='complete'){documentLoaded=true;doOnEvent({type:'load'});}else{w.document[add](pre+'DOMContentLoaded',doOnEvent,false);w.document[add](pre+'readystatechange',doOnEvent,false);w[add](pre+'load',doOnEvent,false);toplevel=false;try{toplevel=!window.frameElement;}catch(ignore){}if(toplevel&&w.document.documentElement.doScroll){doScrollCheck();}}},getLang=function(el,fallback){try{return!!el.getAttribute('lang')?el.getAttribute('lang').toLowerCase():!!el.getAttribute('xml:lang')?el.getAttribute('xml:lang').toLowerCase():el.tagName.toLowerCase()!=='html'?getLang(el.parentNode,fallback):fallback?mainLanguage:null;}catch(ignore){}},autoSetMainLanguage=function(w){w=w||contextWindow;var el=w.document.getElementsByTagName('html')[0],m=w.document.getElementsByTagName('meta'),i,getLangFromUser=function(){var ml,text='',dH=300,dW=450,dX=Math.floor((w.outerWidth-dW)/2)+window.screenX,dY=Math.floor((w.outerHeight-dH)/2)+window.screenY,ul='',languageHint;if(!!window.showModalDialog&&(w.location.href.indexOf(basePath)!==-1)){ml=window.showModalDialog(basePath+'modalLangDialog.html',supportedLangs,"dialogWidth: "+dW+"px; dialogHeight: "+dH+"px; dialogtop: "+dY+"; dialogleft: "+dX+"; center: on; resizable: off; scroll: off;");}else{languageHint=(function(){var k,r='';for(k in supportedLangs){if(supportedLangs.hasOwnProperty(k)){r+=k+', ';}}r=r.substring(0,r.length-2);return r;}());ul=window.navigator.language||window.navigator.userLanguage;ul=ul.substring(0,2);if(!!supportedLangs[ul]&&supportedLangs[ul].prompt!==''){text=supportedLangs[ul].prompt;}else{text=supportedLangs.en.prompt;}text+=' (ISO 639-1)\n\n'+languageHint;ml=window.prompt(window.unescape(text),ul).toLowerCase();}return ml;};mainLanguage=getLang(el,false);if(!mainLanguage){for(i=0;i<m.length;i+=1){if(!!m[i].getAttribute('http-equiv')&&(m[i].getAttribute('http-equiv').toLowerCase()==='content-language')){mainLanguage=m[i].getAttribute('content').toLowerCase();}if(!!m[i].getAttribute('name')&&(m[i].getAttribute('name').toLowerCase()==='dc.language')){mainLanguage=m[i].getAttribute('content').toLowerCase();}if(!!m[i].getAttribute('name')&&(m[i].getAttribute('name').toLowerCase()==='language')){mainLanguage=m[i].getAttribute('content').toLowerCase();}}}if(!mainLanguage&&doFrames&&(!!contextWindow.frameElement)){autoSetMainLanguage(window.parent);}if(!mainLanguage&&defaultLanguage!==''){mainLanguage=defaultLanguage;}if(!mainLanguage){mainLanguage=getLangFromUser();}el.lang=mainLanguage;},gatherDocumentInfos=function(){var elToProcess,urlhyphenEls,tmp,i=0,process=function(el,pLang,isChild){isChild=isChild||false;var n,j=0,hyphenate=true,eLang,useCSS3=function(){css3hyphenateClassHandle=new CSSEdit(contextWindow);css3hyphenateClassHandle.setRule('.'+css3hyphenateClass,css3_h9n.property+': auto;');css3hyphenateClassHandle.setRule('.'+dontHyphenateClass,css3_h9n.property+': manual;');if((eLang!==pLang)&&css3_h9n.property.indexOf('webkit')!==-1){css3hyphenateClassHandle.setRule('.'+css3hyphenateClass,'-webkit-locale : '+eLang+';');}el.className=el.className+' '+css3hyphenateClass;},useHyphenator=function(){if(isBookmarklet&&eLang!==mainLanguage){return;}if(supportedLangs.hasOwnProperty(eLang)){docLanguages[eLang]=true;}else{if(supportedLangs.hasOwnProperty(eLang.split('-')[0])){eLang=eLang.split('-')[0];docLanguages[eLang]=true;}else if(!isBookmarklet){hyphenate=false;onError(new Error('Language "'+eLang+'" is not yet supported.'));}}if(hyphenate){if(intermediateState==='hidden'){el.className=el.className+' '+hideClass;}elements.add(el,eLang);}};if(el.lang&&typeof(el.lang)==='string'){eLang=el.lang.toLowerCase();}else if(!!pLang&&pLang!==''){eLang=pLang.toLowerCase();}else{eLang=getLang(el,true);}if(!isChild){if(css3&&css3_h9n.support&&!!css3_h9n.checkLangSupport(eLang)){useCSS3();}else{useHyphenator();}}else{if(eLang!==pLang){if(css3&&css3_h9n.support&&!!css3_h9n.checkLangSupport(eLang)){useCSS3();}else{useHyphenator();}}else{if(!css3||!css3_h9n.support||!css3_h9n.checkLangSupport(eLang)){useHyphenator();}}}n=el.childNodes[j];while(!!n){if(n.nodeType===1&&!dontHyphenate[n.nodeName.toLowerCase()]&&n.className.indexOf(dontHyphenateClass)===-1&&n.className.indexOf(urlHyphenateClass)===-1&&!elToProcess[n]){process(n,eLang,true);}j+=1;n=el.childNodes[j];}},processUrlStyled=function(el){var n,j=0;n=el.childNodes[j];while(!!n){if(n.nodeType===1&&!dontHyphenate[n.nodeName.toLowerCase()]&&n.className.indexOf(dontHyphenateClass)===-1&&n.className.indexOf(hyphenateClass)===-1&&!urlhyphenEls[n]){processUrlStyled(n);}else if(n.nodeType===3){n.data=hyphenateURL(n.data);}j+=1;n=el.childNodes[j];}};if(css3){css3_gethsupport();}if(isBookmarklet){elToProcess=contextWindow.document.getElementsByTagName('body')[0];process(elToProcess,mainLanguage,false);}else{if(!css3&&intermediateState==='hidden'){CSSEditors.push(new CSSEdit(contextWindow));CSSEditors[CSSEditors.length-1].setRule('.'+hyphenateClass,'visibility: hidden;');CSSEditors[CSSEditors.length-1].setRule('.'+hideClass,'visibility: hidden;');CSSEditors[CSSEditors.length-1].setRule('.'+unhideClass,'visibility: visible;');}elToProcess=selectElements();tmp=elToProcess[i];while(!!tmp){process(tmp,'',false);i+=1;tmp=elToProcess[i];}urlhyphenEls=mySelectorFunction(urlHyphenateClass);i=0;tmp=urlhyphenEls[i];while(!!tmp){processUrlStyled(tmp);i+=1;tmp=urlhyphenEls[i];}}if(elements.count===0){for(i=0;i<CSSEditors.length;i+=1){CSSEditors[i].clearChanges();}onHyphenationDone(contextWindow.location.href);}},convertPatterns=function(lo){var size,tree={},convert=function(psize,patterns){var i=0,t=tree,cc=0,points=[],ppos=0,lastwasp=false,len=0;while(i<patterns.length){if(len<psize){cc=patterns.charCodeAt(i);if(cc>=49&&cc<=57){points[ppos]=cc-48;ppos+=1;lastwasp=true;}else{if(!t[cc]){t[cc]={};}t=t[cc];if(!lastwasp){points[ppos]=0;ppos+=1;}lastwasp=false;}len+=1;i+=1;}if(len===psize){if(!lastwasp){points[ppos]=0;}t.tpoints=points;t=tree;points=[];ppos=0;lastwasp=false;len=0;}}};for(size in lo.patterns){if(lo.patterns.hasOwnProperty(size)){convert(parseInt(size,10),lo.patterns[size]);}}lo.patterns=tree;},recreatePattern=function(pattern,nodePoints){var r=[],c=pattern.split(''),i;for(i=0;i<nodePoints.length;i+=1){if(nodePoints[i]!==0){r.push(nodePoints[i]);}if(c[i]){r.push(c[i]);}}return r.join('');},convertExceptionsToObject=function(exc){var w=exc.split(', '),r={},i,l,key;for(i=0,l=w.length;i<l;i+=1){key=w[i].replace(/-/g,'');if(!r.hasOwnProperty(key)){r[key]=w[i];}}return r;},loadPatterns=function(lang){var location,xhr,head,script;if(supportedLangs.hasOwnProperty(lang)&&!Hyphenator.languages[lang]){location=basePath+'patterns/'+supportedLangs[lang].file;}else{return;}if(isLocal&&!isBookmarklet){xhr=null;try{xhr=new window.XMLHttpRequest();}catch(e){try{xhr=new window.ActiveXObject("Microsoft.XMLHTTP");}catch(e2){try{xhr=new window.ActiveXObject("Msxml2.XMLHTTP");}catch(e3){xhr=null;}}}if(xhr){xhr.open('HEAD',location,true);xhr.setRequestHeader('Cache-Control','no-cache');xhr.onreadystatechange=function(){if(xhr.readyState===4){if(xhr.status===404){onError(new Error('Could not load\n'+location));delete docLanguages[lang];return;}}};xhr.send(null);}}if(createElem){head=window.document.getElementsByTagName('head').item(0);script=createElem('script',window);script.src=location;script.type='text/javascript';script.charset='utf8';head.appendChild(script);}},prepareLanguagesObj=function(lang){var lo=Hyphenator.languages[lang],wrd;if(!lo.prepared){if(enableCache){lo.cache={};}if(enableReducedPatternSet){lo.redPatSet={};}if(lo.hasOwnProperty('exceptions')){Hyphenator.addExceptions(lang,lo.exceptions);delete lo.exceptions;}if(exceptions.hasOwnProperty('global')){if(exceptions.hasOwnProperty(lang)){exceptions[lang]+=', '+exceptions.global;}else{exceptions[lang]=exceptions.global;}}if(exceptions.hasOwnProperty(lang)){lo.exceptions=convertExceptionsToObject(exceptions[lang]);delete exceptions[lang];}else{lo.exceptions={};}convertPatterns(lo);wrd='[\\w'+lo.specialChars+'@'+String.fromCharCode(173)+String.fromCharCode(8204)+'-]{'+min+',}';lo.genRegExp=new RegExp('('+url+')|('+mail+')|('+wrd+')','gi');lo.prepared=true;}if(!!storage){storage.setItem(lang,window.JSON.stringify(lo));}},prepare=function(callback){var lang,interval,tmp1,tmp2,languagesLoaded=function(){var finishedLoading=true,l;for(l in docLanguages){if(docLanguages.hasOwnProperty(l)){finishedLoading=false;if(!!Hyphenator.languages[l]){delete docLanguages[l];prepareLanguagesObj(l);callback(l);}}}return finishedLoading;};if(!enableRemoteLoading){for(lang in Hyphenator.languages){if(Hyphenator.languages.hasOwnProperty(lang)){prepareLanguagesObj(lang);}}callback('*');return;}for(lang in docLanguages){if(docLanguages.hasOwnProperty(lang)){if(!!storage&&storage.test(lang)){Hyphenator.languages[lang]=window.JSON.parse(storage.getItem(lang));if(exceptions.hasOwnProperty('global')){tmp1=convertExceptionsToObject(exceptions.global);for(tmp2 in tmp1){if(tmp1.hasOwnProperty(tmp2)){Hyphenator.languages[lang].exceptions[tmp2]=tmp1[tmp2];}}}if(exceptions.hasOwnProperty(lang)){tmp1=convertExceptionsToObject(exceptions[lang]);for(tmp2 in tmp1){if(tmp1.hasOwnProperty(tmp2)){Hyphenator.languages[lang].exceptions[tmp2]=tmp1[tmp2];}}delete exceptions[lang];}tmp1='[\\w'+Hyphenator.languages[lang].specialChars+'@'+String.fromCharCode(173)+String.fromCharCode(8204)+'-]{'+min+',}';Hyphenator.languages[lang].genRegExp=new RegExp('('+url+')|('+mail+')|('+tmp1+')','gi');delete docLanguages[lang];callback(lang);}else{loadPatterns(lang);}}}if(!languagesLoaded()){interval=window.setInterval(function(){var loadingDone=languagesLoaded();if(loadingDone){window.clearInterval(interval);}},100);}},toggleBox=function(){var bdy,myTextNode,text=(Hyphenator.doHyphenation?'Hy-phen-a-tion':'Hyphenation'),myBox=contextWindow.document.getElementById('HyphenatorToggleBox');if(!!myBox){myBox.firstChild.data=text;}else{bdy=contextWindow.document.getElementsByTagName('body')[0];myBox=createElem('div',contextWindow);myBox.setAttribute('id','HyphenatorToggleBox');myBox.setAttribute('class',dontHyphenateClass);myTextNode=contextWindow.document.createTextNode(text);myBox.appendChild(myTextNode);myBox.onclick=Hyphenator.toggleHyphenation;myBox.style.position='absolute';myBox.style.top='0px';myBox.style.right='0px';myBox.style.margin='0';myBox.style.backgroundColor='#AAAAAA';myBox.style.color='#FFFFFF';myBox.style.font='6pt Arial';myBox.style.letterSpacing='0.2em';myBox.style.padding='3px';myBox.style.cursor='pointer';myBox.style.WebkitBorderBottomLeftRadius='4px';myBox.style.MozBorderRadiusBottomleft='4px';myBox.style.borderBottomLeftRadius='4px';bdy.appendChild(myBox);}},hyphenateWord=function(lo,lang,word){var parts,i,pattern,ww,wwlen,wwhp=[],pstart,plen,trie=lo.patterns,node,nodePoints,hp,wordLength=word.length,hw='',doCharSubst=function(w){var subst,r;for(subst in lo.charSubstitution){if(lo.charSubstitution.hasOwnProperty(subst)){r=w.replace(new RegExp(subst,'g'),lo.charSubstitution[subst]);}}return r;};word=onBeforeWordHyphenation(word,lang);if(word===''){hw='';}else if(enableCache&&lo.cache.hasOwnProperty(word)){hw=lo.cache[word];}else if(word.indexOf(hyphen)!==-1){hw=word;}else if(lo.exceptions.hasOwnProperty(word)){hw=lo.exceptions[word].replace(/-/g,hyphen);}else if(word.indexOf('-')!==-1){parts=word.split('-');for(i=0;i<parts.length;i+=1){parts[i]=hyphenateWord(lo,lang,parts[i]);}hw=parts.join('-');}else{ww=word.toLowerCase();if(!!lo.charSubstitution){ww=doCharSubst(ww);}if(word.indexOf("'")!==-1){ww=ww.replace("'","’");}ww='_'+ww+'_';wwlen=ww.length;for(hp=0;hp<wwlen+1;hp+=1){wwhp[hp]=0;}for(pstart=0;pstart<wwlen;pstart+=1){node=trie;pattern='';for(plen=pstart;plen<wwlen;plen+=1){node=node[ww.charCodeAt(plen)];if(node){if(enableReducedPatternSet){pattern+=ww.charAt(plen);}nodePoints=node.tpoints;if(nodePoints){if(enableReducedPatternSet){if(!lo.redPatSet){lo.redPatSet={};}lo.redPatSet[pattern]=recreatePattern(pattern,nodePoints);}for(hp=0;hp<nodePoints.length;hp+=1){wwhp[pstart+hp]=Math.max(wwhp[pstart+hp],nodePoints[hp]);}}}else{break;}}}for(hp=0;hp<wordLength;hp+=1){if(hp>=lo.leftmin&&hp<=(wordLength-lo.rightmin)&&(wwhp[hp+1]%2)!==0){hw+=hyphen+word.charAt(hp);}else{hw+=word.charAt(hp);}}}hw=onAfterWordHyphenation(hw,lang);if(enableCache){lo.cache[word]=hw;}return hw;},removeHyphenationFromElement=function(el){var h,u,i=0,n;switch(hyphen){case'|':h='\\|';break;case'+':h='\\+';break;case'*':h='\\*';break;default:h=hyphen;}switch(urlhyphen){case'|':u='\\|';break;case'+':u='\\+';break;case'*':u='\\*';break;default:u=urlhyphen;}n=el.childNodes[i];while(!!n){if(n.nodeType===3){n.data=n.data.replace(new RegExp(h,'g'),'');n.data=n.data.replace(new RegExp(u,'g'),'');}else if(n.nodeType===1){removeHyphenationFromElement(n);}i+=1;n=el.childNodes[i];}},copy=(function(){var Copy=function(){this.oncopyHandler=function(e){e=e||window.event;var shadow,selection,range,rangeShadow,restore,target=e.target||e.srcElement,currDoc=target.ownerDocument,bdy=currDoc.getElementsByTagName('body')[0],targetWindow=currDoc.defaultView||currDoc.parentWindow;if(target.tagName&&dontHyphenate[target.tagName.toLowerCase()]){return;}shadow=currDoc.createElement('div');shadow.style.color=window.getComputedStyle?targetWindow.getComputedStyle(bdy,null).backgroundColor:'#FFFFFF';shadow.style.fontSize='0px';bdy.appendChild(shadow);if(!!window.getSelection){e.stopPropagation();selection=targetWindow.getSelection();range=selection.getRangeAt(0);shadow.appendChild(range.cloneContents());removeHyphenationFromElement(shadow);selection.selectAllChildren(shadow);restore=function(){shadow.parentNode.removeChild(shadow);selection.removeAllRanges();selection.addRange(range);};}else{e.cancelBubble=true;selection=targetWindow.document.selection;range=selection.createRange();shadow.innerHTML=range.htmlText;removeHyphenationFromElement(shadow);rangeShadow=bdy.createTextRange();rangeShadow.moveToElementText(shadow);rangeShadow.select();restore=function(){shadow.parentNode.removeChild(shadow);if(range.text!==""){range.select();}};}window.setTimeout(restore,0);};this.removeOnCopy=function(el){var body=el.ownerDocument.getElementsByTagName('body')[0];if(!body){return;}el=el||body;if(window.removeEventListener){el.removeEventListener("copy",this.oncopyHandler,true);}else{el.detachEvent("oncopy",this.oncopyHandler);}};this.registerOnCopy=function(el){var body=el.ownerDocument.getElementsByTagName('body')[0];if(!body){return;}el=el||body;if(window.addEventListener){el.addEventListener("copy",this.oncopyHandler,true);}else{el.attachEvent("oncopy",this.oncopyHandler);}};};return(safeCopy?new Copy():false);}()),checkIfAllDone=function(){var allDone=true,i,doclist={},doc;elements.each(function(ellist){var j,l=ellist.length;for(j=0;j<l;j+=1){allDone=allDone&&ellist[j].hyphenated;if(!doclist.hasOwnProperty(ellist[j].element.baseURI)){doclist[ellist[j].element.ownerDocument.location.href]=true;}doclist[ellist[j].element.ownerDocument.location.href]=doclist[ellist[j].element.ownerDocument.location.href]&&ellist[j].hyphenated;}});if(allDone){if(intermediateState==='hidden'&&unhide==='progressive'){elements.each(function(ellist){var j,l=ellist.length,el;for(j=0;j<l;j+=1){el=ellist[j].element;el.className=el.className.replace(unhideClassRegExp,'');if(el.className===''){el.removeAttribute('class');}}});}for(i=0;i<CSSEditors.length;i+=1){CSSEditors[i].clearChanges();}for(doc in doclist){if(doclist.hasOwnProperty(doc)){onHyphenationDone(doc);}}}},hyphenateElement=function(lang,elo){var el=elo.element,hyphenate,n,i,lo,controlOrphans=function(part){var h,r;switch(hyphen){case'|':h='\\|';break;case'+':h='\\+';break;case'*':h='\\*';break;default:h=hyphen;}part=part.replace(/[\s]*$/,'');if(orphanControl>=2){r=part.split(' ');r[1]=r[1].replace(new RegExp(h,'g'),'');r[1]=r[1].replace(new RegExp(zeroWidthSpace,'g'),'');r=r.join(' ');}if(orphanControl===3){r=r.replace(/[ ]+/g,String.fromCharCode(160));}return r;};if(Hyphenator.languages.hasOwnProperty(lang)){lo=Hyphenator.languages[lang];hyphenate=function(word){var r;if(!Hyphenator.doHyphenation){r=word;}else if(urlOrMailRE.test(word)){r=hyphenateURL(word);}else{r=hyphenateWord(lo,lang,word);}return r;};if(safeCopy&&(el.tagName.toLowerCase()!=='body')){copy.registerOnCopy(el);}i=0;n=el.childNodes[i];while(!!n){if(n.nodeType===3&&n.data.length>=min){n.data=n.data.replace(lo.genRegExp,hyphenate);if(orphanControl!==1){n.data=n.data.replace(/[\S]+ [\S]+[\s]*$/,controlOrphans);}}i+=1;n=el.childNodes[i];}}if(intermediateState==='hidden'&&unhide==='wait'){el.className=el.className.replace(hideClassRegExp,'');if(el.className===''){el.removeAttribute('class');}}if(intermediateState==='hidden'&&unhide==='progressive'){el.className=el.className.replace(hideClassRegExp,' '+unhideClass);}elo.hyphenated=true;elements.hyCount+=1;if(elements.count<=elements.hyCount){checkIfAllDone();}},hyphenateLanguageElements=function(lang){function bind(fun,arg1,arg2){return function(){return fun(arg1,arg2);};}var i,l;if(lang==='*'){elements.each(function(lang,ellist){var j,le=ellist.length;for(j=0;j<le;j+=1){window.setTimeout(bind(hyphenateElement,lang,ellist[j]),0);}});}else{if(elements.list.hasOwnProperty(lang)){l=elements.list[lang].length;for(i=0;i<l;i+=1){window.setTimeout(bind(hyphenateElement,lang,elements.list[lang][i]),0);}}}},removeHyphenationFromDocument=function(){elements.each(function(ellist){var i,l=ellist.length;for(i=0;i<l;i+=1){removeHyphenationFromElement(ellist[i].element);if(safeCopy){copy.removeOnCopy(ellist[i].element);}ellist[i].hyphenated=false;}});},createStorage=function(){var s;try{if(storageType!=='none'&&window.localStorage!==undefined&&window.sessionStorage!==undefined&&window.JSON.stringify!==undefined&&window.JSON.parse!==undefined){switch(storageType){case'session':s=window.sessionStorage;break;case'local':s=window.localStorage;break;default:s=undefined;break;}s.setItem('storageTest','1');s.removeItem('storageTest');}}catch(e){s=undefined;}if(s){storage={prefix:'Hyphenator_'+Hyphenator.version+'_',store:s,test:function(name){var val=this.store.getItem(this.prefix+name);return(!!val)?true:false;},getItem:function(name){return this.store.getItem(this.prefix+name);},setItem:function(name,value){try{this.store.setItem(this.prefix+name,value);}catch(e){onError(e);}}};}else{storage=undefined;}},storeConfiguration=function(){if(!storage){return;}var settings={'STORED':true,'classname':hyphenateClass,'urlclassname':urlHyphenateClass,'donthyphenateclassname':dontHyphenateClass,'minwordlength':min,'hyphenchar':hyphen,'urlhyphenchar':urlhyphen,'togglebox':toggleBox,'displaytogglebox':displayToggleBox,'remoteloading':enableRemoteLoading,'enablecache':enableCache,'enablereducedpatternset':enableReducedPatternSet,'onhyphenationdonecallback':onHyphenationDone,'onerrorhandler':onError,'onwarninghandler':onWarning,'intermediatestate':intermediateState,'selectorfunction':selectorFunction||mySelectorFunction,'safecopy':safeCopy,'doframes':doFrames,'storagetype':storageType,'orphancontrol':orphanControl,'dohyphenation':Hyphenator.doHyphenation,'persistentconfig':persistentConfig,'defaultlanguage':defaultLanguage,'useCSS3hyphenation':css3,'unhide':unhide,'onbeforewordhyphenation':onBeforeWordHyphenation,'onafterwordhyphenation':onAfterWordHyphenation};storage.setItem('config',window.JSON.stringify(settings));},restoreConfiguration=function(){var settings;if(storage.test('config')){settings=window.JSON.parse(storage.getItem('config'));Hyphenator.config(settings);}};return{version:'4.3.0',doHyphenation:true,languages:{},config:function(obj){var assert=function(name,type){var r,t;t=typeof obj[name];if(t===type){r=true;}else{onError(new Error('Config onError: '+name+' must be of type '+type));r=false;}return r;},key;if(obj.hasOwnProperty('storagetype')){if(assert('storagetype','string')){storageType=obj.storagetype;}if(!storage){createStorage();}}if(!obj.hasOwnProperty('STORED')&&storage&&obj.hasOwnProperty('persistentconfig')&&obj.persistentconfig===true){restoreConfiguration();}for(key in obj){if(obj.hasOwnProperty(key)){switch(key){case'STORED':break;case'classname':if(assert('classname','string')){hyphenateClass=obj[key];}break;case'urlclassname':if(assert('urlclassname','string')){urlHyphenateClass=obj[key];}break;case'donthyphenateclassname':if(assert('donthyphenateclassname','string')){dontHyphenateClass=obj[key];}break;case'minwordlength':if(assert('minwordlength','number')){min=obj[key];}break;case'hyphenchar':if(assert('hyphenchar','string')){if(obj.hyphenchar==='­'){obj.hyphenchar=String.fromCharCode(173);}hyphen=obj[key];}break;case'urlhyphenchar':if(obj.hasOwnProperty('urlhyphenchar')){if(assert('urlhyphenchar','string')){urlhyphen=obj[key];}}break;case'togglebox':if(assert('togglebox','function')){toggleBox=obj[key];}break;case'displaytogglebox':if(assert('displaytogglebox','boolean')){displayToggleBox=obj[key];}break;case'remoteloading':if(assert('remoteloading','boolean')){enableRemoteLoading=obj[key];}break;case'enablecache':if(assert('enablecache','boolean')){enableCache=obj[key];}break;case'enablereducedpatternset':if(assert('enablereducedpatternset','boolean')){enableReducedPatternSet=obj[key];}break;case'onhyphenationdonecallback':if(assert('onhyphenationdonecallback','function')){onHyphenationDone=obj[key];}break;case'onerrorhandler':if(assert('onerrorhandler','function')){onError=obj[key];}break;case'onwarninghandler':if(assert('onwarninghandler','function')){onWarning=obj[key];}break;case'intermediatestate':if(assert('intermediatestate','string')){intermediateState=obj[key];}break;case'selectorfunction':if(assert('selectorfunction','function')){selectorFunction=obj[key];}break;case'safecopy':if(assert('safecopy','boolean')){safeCopy=obj[key];}break;case'doframes':if(assert('doframes','boolean')){doFrames=obj[key];}break;case'storagetype':if(assert('storagetype','string')){storageType=obj[key];}break;case'orphancontrol':if(assert('orphancontrol','number')){orphanControl=obj[key];}break;case'dohyphenation':if(assert('dohyphenation','boolean')){Hyphenator.doHyphenation=obj[key];}break;case'persistentconfig':if(assert('persistentconfig','boolean')){persistentConfig=obj[key];}break;case'defaultlanguage':if(assert('defaultlanguage','string')){defaultLanguage=obj[key];}break;case'useCSS3hyphenation':if(assert('useCSS3hyphenation','boolean')){css3=obj[key];}break;case'unhide':if(assert('unhide','string')){unhide=obj[key];}break;case'onbeforewordhyphenation':if(assert('onbeforewordhyphenation','function')){onBeforeWordHyphenation=obj[key];}break;case'onafterwordhyphenation':if(assert('onafterwordhyphenation','function')){onAfterWordHyphenation=obj[key];}break;default:onError(new Error('Hyphenator.config: property '+key+' not known.'));}}}if(storage&&persistentConfig){storeConfiguration();}},run:function(){var process=function(){try{if(contextWindow.document.getElementsByTagName('frameset').length>0){return;}autoSetMainLanguage(undefined);gatherDocumentInfos();prepare(hyphenateLanguageElements);if(displayToggleBox){toggleBox();}}catch(e){onError(e);}};if(!storage){createStorage();}runWhenLoaded(window,process);},addExceptions:function(lang,words){if(lang===''){lang='global';}if(exceptions.hasOwnProperty(lang)){exceptions[lang]+=", "+words;}else{exceptions[lang]=words;}},hyphenate:function(target,lang){var hyphenate,n,i,lo;lo=Hyphenator.languages[lang];if(Hyphenator.languages.hasOwnProperty(lang)){if(!lo.prepared){prepareLanguagesObj(lang);}hyphenate=function(word){var r;if(urlOrMailRE.test(word)){r=hyphenateURL(word);}else{r=hyphenateWord(lo,lang,word);}return r;};if(typeof target==='object'&&!(typeof target==='string'||target.constructor===String)){i=0;n=target.childNodes[i];while(!!n){if(n.nodeType===3&&n.data.length>=min){n.data=n.data.replace(lo.genRegExp,hyphenate);}else if(n.nodeType===1){if(n.lang!==''){Hyphenator.hyphenate(n,n.lang);}else{Hyphenator.hyphenate(n,lang);}}i+=1;n=target.childNodes[i];}}else if(typeof target==='string'||target.constructor===String){return target.replace(lo.genRegExp,hyphenate);}}else{onError(new Error('Language "'+lang+'" is not loaded.'));}},getRedPatternSet:function(lang){return Hyphenator.languages[lang].redPatSet;},isBookmarklet:function(){return isBookmarklet;},getConfigFromURI:function(){var loc=null,re={},jsArray=contextWindow.document.getElementsByTagName('script'),i,j,l,s,gp,option;for(i=0,l=jsArray.length;i<l;i+=1){if(!!jsArray[i].getAttribute('src')){loc=jsArray[i].getAttribute('src');}if(loc&&(loc.indexOf('Hyphenator.js?')!==-1)){s=loc.indexOf('Hyphenator.js?');gp=loc.substring(s+14).split('&');for(j=0;j<gp.length;j+=1){option=gp[j].split('=');if(option[0]!=='bm'){if(option[1]==='true'){option[1]=true;}else if(option[1]==='false'){option[1]=false;}else if(isFinite(option[1])){option[1]=parseInt(option[1],10);}if(option[0]==='togglebox'||option[0]==='onhyphenationdonecallback'||option[0]==='onerrorhandler'||option[0]==='selectorfunction'||option[0]==='onbeforewordhyphenation'||option[0]==='onafterwordhyphenation'){option[1]=new Function('',option[1]);}re[option[0]]=option[1];}}break;}}return re;},toggleHyphenation:function(){if(Hyphenator.doHyphenation){if(!!css3hyphenateClassHandle){css3hyphenateClassHandle.setRule('.'+css3hyphenateClass,css3_h9n.property+': none;');}removeHyphenationFromDocument();Hyphenator.doHyphenation=false;storeConfiguration();toggleBox();}else{if(!!css3hyphenateClassHandle){css3hyphenateClassHandle.setRule('.'+css3hyphenateClass,css3_h9n.property+': auto;');}hyphenateLanguageElements('*');Hyphenator.doHyphenation=true;storeConfiguration();toggleBox();}}};}(window));if(Hyphenator.isBookmarklet()){Hyphenator.config({displaytogglebox:true,intermediatestate:'visible',storagetype:'local',doframes:true,useCSS3hyphenation:true});Hyphenator.config(Hyphenator.getConfigFromURI());Hyphenator.run();}Hyphenator.languages['en-gb']={leftmin:2,rightmin:3,specialChars:"",patterns:{3:"sw2s2ym1p2chck1cl2cn2st24sss1rzz21moc1qcr2m5q2ct2byb1vcz2z5sd3bs1jbr4m3rs2hd2gbo2t3gd1jb1j1dosc2d1pdr2dt4m1v1dum3w2myd1vea2r2zr1we1bb2e2edn1az1irt2e1fe1j4aya4xr1q2av2tlzd4r2kr1jer1m1frh2r1fr2er1bqu44qft3ptr22ffy3wyv4y3ufl21fo1po2pn2ft3fut1wg1ba2ra4q2gh4ucm2ep5gp1fm5d2ap2aom1cg3p2gyuf2ha2h1bh1ch1d4nda2nhe22oz2oyo4xh1fh5h4hl2ot2hrun1h1wh2y2yp2aki2d2upie22ah2oo2igu4r2ii2omo1j2oiyn1lz42ip2iq2ir1aba4a2ocn3fuu4uv22ix1iz1jay1iy1h2lylx4l3wn5w2ji4jr4ng4jsy1gk1ck1fkk4y5fk1mkn21vok1pvr44vsk1t4vyk5vk1wl2aw5cn2ul3bw5fwh2wi2w1m1wowt4wy2wz4x1an1in1rn1ql3hxe4x1hx1ill24lsn3mlm2n1jx1ox3plr4x5wxx4",4:"d3gr_fi2xy3ty1a2x5usy5acx1urxu4on2ielph2xti4ni2gx4thn2ilx1t2x1s25niql3rix4osxo4n1logn2ivx5om1locl3ro2lo_l3nel1n4_hi2l5rul1mexi4pl1max3io_ex1l1lu_ig3ll5tll3sll3p_in14n2kl1loll3mn3le_ew4n1n4nne4l1lixi4cll3fn3nil1lal5skls4p_eu14no_l4ivx3erx3enl1itx1eml1isx5eg3lirli1qxe2d3lik5lihx1ec1lig4y1bn1oun4ow4li_x3c4yb2il1g2l2fox2as1leyn3p42lev1letx2ag4ni_l1te_es1nhy2yc1l4n1sw3tow5tenho4ns2cwra42lerle5qn2si3womwol4l1try1d4lek42ledwl1in3suw3la4le_l3don1teldi2nth2lce4yda4l1c2l1tu4lu_l4by_od4lbe4lu1a4laz_oi4l4awnt2iwes4l4aul4asn2tjla4p_or1n1tr5wein1tun2tyn1h2w4ednu1awe4b5nuc_os13nudl4all4af_ov4w3drl4aey3eenu3iw1b45nukl4ac5laa4la_4lue3kyllu1in1gu4wabn1go_ph2v5vikur5_en12vv2ks4ty3enk3slv5rov5ri4k1sk3rung1n2vowy1erkol4ko5a4vonk2novo2l2vo_5lupn2gingh4k3lok3lik3lak2l2ng2aki4wvi2tkis4k1inki2l5kihk3holu1vke4g3kee4kedkdo4_sa2k5d2_eg4k1b4kav4kap4vim4ka3ovi4lk4ann3v2nve2vic2ka4lju1v4vi_ju5ljui4_sh2ygi2nfo4_st44jo_3jo2jil43jigl4vi2vel3veive3gjew3jeu42ve_4jesjeo2y3gljal43jac2ja__th44ly_2izz_ti22izo_do2i5yeix3oy3in2i1wn2x4i2vov4ad2ny25nyc5vacn1z24va_nzy4uy4aux2o2oa2o3ag2ivauve2u4vayle2i3um2ittly1c4obau3tu2itrob2i4obo_up12ithob5tuts2lym2ut2o_ve2oc2ait1a2isyo1clo1crut2ioct2is1pis1lo1cy4usto2doo2du4isblyp2n4ew2ab_2abai4saoe3a2abbus1pir2sir4qoe4do5eeir1ioep5o5eqo3er2usco1etir1a3lyr3lywipy43oeuo3evi3poab1ro3ex4ofo2o1gur1uo2ga2abyac2a3lyzi5oxo3gii3oti1orioe4ur2so2gui1od2io22acio1h2ur1o2inuo3hao3heohy44ma_oi4cins24inqoig4ac1r2ino2inn4inl4inkur1ioi4our2f4oisoi4t2iniynd4ok3lok5u2ind2inco1loyn2eo1mai2moom1iur2ca2doim1iil3v4iluon1co2nead1ril3f4onh2ik24iju4adyae5aija4i5in4aed2mahae5gihy4ae5pur1aae4s2i1h4igions2i1geyng42ont4af_4afe5maka4fui3fyu2pri3foon2zn1eru4po4agli2fe2i1foo1iu1ph4ieua2groo4moo2pyn4yi1er4iemie5ia1heah4n4iec2ai24ai_ai3aa1icne2p4idraig2oo2tu1peo1paop1iy1o2u1ouu3os4oplid1ayo3d2icuop1uor1a2ick4ich2a1ja4ju2mam4iceak5u4ibuunu44iboib1i2oreiav4i3aui3atun5ror1iun5o2alei5aii3ah2unniaf4i5ae2ormhy4thyr4hy3ohyn4hy2m2orthy2l1man2nedhuz4un2ihu4gh1th4alko1sch4skhsi42mapu1mu2h1shry4hri4hre41mar4h1pum2ph2ou4osp4osuy2ph4oth4ho_u1mi2h1mh1leh3la2ne_h4irhi2pu1mao4u2oub2h1in2a2mhi4l4oueu1lu2ulsoug4h1ic2hi_u1loul3mnde24ulln2daheu2ul2iou3mam1ihet12ounhep1ow1iows4ow5yyp1nox3ih4eiox5oypo1oy5aoys4u1la4ul_am2pu2izmav4h2ea4he_y2prhdu42m1ban2ao1zo_ch4mb4dy5pu4pa_ha4m1paru2ic5pau2ui2h4ac4ha_u4gon1cug5z2uft43gynu4fou3fl3ufa5gymmb2iue4tgy2b4anhnc1t2g1w5paw3gun2p1bu4edueb4p1c42guep5d2an1og5to2pe_gs4tgs4c2g1san2s2ped3grug4rou2dog4reud4g1gr2n1crgov12gou3gosud4e3goop4ee3goe5god3goc5goa2go_pe2fg2nog1niuc3lg1na2gn2an2y2pes3gluyr4r3pet5aowyr4s4ap_4apa3glo4pexyr5uu4ch2gl24y2s5gip2me_3gioap1i2ph_gi4g3gib4gi_uba41g2igh2tg3hoa2prphe44aps2medg2gegg4ame2g2g1gy3shu1alua5hu2ag2g1f3get2ua2ph2lge4o1pho2tz23gen4phs1gel1typ4gef2ge_g5d4me2m1phug1at4pi_p2iety4a4ty_p2ilt3wopim23gait2wi3gagn3b44ga_5piqar3har1i1tutfu4c4fu_1menp2l23tunna2vfs4p2f3s1pla1fr2tu1ifo3v4tufp4ly2p1myso53foo2arrme4par2stu1afo2n4tu_4po_t2tytt5s3pod2aru4poffo2e3foc4fo_ar5zas1ays1t3flu2asc3flo3flan2asas2et3ti2fin5poypph44f5hf3fr1pr2f1fif1fena5o3feufe4t4pry2ps22asotta4p3sh5fei3fecass2p1sits2its4ht2sc2fe_4t1s2f5d4f5b5faw5farp1st2pt2as1u2fa_1f2aeyl44ey_1expe1wre3whe1waevu4p4trp1tupub1puc4p4uneus44eumeuk5eue4p4uset5zyzy4z1a14p1wet2t2p4y4tovpy3e3pyg3pylpy5t2za__av44ra_r2adras2et2ae1su1namr2bat1orr2berb2ir1c2r2clrct4nak24re_rea4e2sc4es_2erza2to5tok2erurei4erk44erj1tog3toere1qre1vza2irf4lr1g2r2gez4as4ri_2ereto1b2erd2to_2erc4m3hri3ori5reph14mi_2au24au_m1ic4auc4t3me1paeo3mt1lieo2leof2eo3b4enur1lar1leaun2r1loen2sen1ot1laen3kzeb4r1mur2n24ene2end3tiurn5nrnt4ze4d4ro_r2od4roiroo4r2opelv4e1lur4owti4q1tip4roxrpe2r2ph1tior3puaw1i5nahaw5y4mijr3ri_as12eleay3mayn4ays2r5rurry5ek4l2az2m2ilaze4e2ize2iv4eis2ba_t1ineig24eifeid45bahba4ir2seehy21timeh5se5hoe1h2e2gr2efuef4lna2ceep1ee2mee1iee5gee2fr3su2na_rt3ced4g1basede23mytr1turu3ar2udr4ufe1clru2le1ceru2pb1c2ec2a2b1deb2te2bre4bl3myi4be_3beaeb2iebe4eb2b2bedzib5r1v2r2veeau3t1icmy3e5bee3bef2r2yry2tz2ie1bel2sa_2sabeap25saebe3meak1ea4gsa4g3sai4ti_5sak4beobe3q4eabmy4dd3zo3dyndyl25dyksa2l2d2y2d1wsa4mbe3w2b1fbfa44b1hb4ha2bi_1biazi5mdu3udu2ps3apb4ie3ducbif42ths2du_z4isb1ilmi3od4swds3m4bimd5sl1saumi3pz3li3dox4s3bd4osd2or3doosby3bip4bi5qbir44zo_s1cab2iss1cedo4jd4ob4do_5zoa2d1mmtu4d5lu2bl2d1losch2d1la2dl4tha42th_m5si4m1ss2co2t3f1diu2se_se2a4bly2b1m3texbmi44b1nm4ry4bo_3boa2sed5bobdil4bo5h3sei1didse2p1dia4di_d4hu3bon4d1hxys4dg4ami2t2d5f1boo3dexs2es1set3sev3sex3sey2s1fsfi4_an1d3eqde1ps4idsif4bow2si4g2sin5boyzo5p3sipde3gs1it3dec2de_d3di2tep3miute2od1d4d3c4zot23davs2k24sk_d1atske2d3ap4sksd1agb3sc2sl44da_5zumb5sicy4tbso2te2ltei4cys4cy4m2b1tcyl34bu_5bubte2g1cyc2cy_bun2cu5v5cuu1cuss2le1curt4edc4ufc1tyc1tu4te_c1trs1n2s2na2so_t1ca5mix4b3w4zy_4by_3byibys45byt2ca_2tc23soes2olc1te5cafsos45cai5cakc1al3sou4t3bt4axc2ta4m1lcry2sph2s1plc2res2pos4pym3pum3pocoz4cov14mo_sre22moc5cao1caps1sa3cooss3mcon11cars4sns1sos1su1takss3wmod13coe4st_1tai3tah3coc3coa4co_taf4c3nim2pist3cc1atste2mo1mc4kem4ons1th2cim3cau2tab2ta_3cayc1c44stl3cilc3ch3syn4cigci3f4ce_4ci_3chrs1tu1cho2ced4chm1sylch5k4stw4cefce5gs4tysy4d4su_sug3sy1c3sui4ch_m3pa2cem4sy_cew4ce2t1cepsu5zm4op2swo2s3vzzo3",5:"n5tau2cenn3centsves45swee5cencsu5sus4urg1cen2sur3csu5pe3cerasun4a3cerdsum3i5cern5cesss4u2m1s2ulce4mo3cemi4celysy4bi4chab3chae3chaisui5ccelo45cellchec44ched3chee3chemsuf3fch1ersu3etsud4asuct44chessubt2ch5eusu4b13chewch5ex5chi_3chiasu5ansy4ce1styl3ceiv3chio5chip3cedi3cedestu4m5cedace4cicho3a5choc4chois4tud3chor3ceas2st3sstre43chots2tou3stonchow5cean3chur43chut5chyd3chyl3chym1c2i24ceab4ciaccia4mci3ca4cids4cie_ci3ers4toeci5etccle3cifi4ccip4ci3gast3lisyn5esyr5icat4ucim3aci3mes5tizs4thu4cinds4thac4atss4tec4cintci3olci5omci4pocisi4cit3rt2abockar5cka5tt5adeck5ifck4scc2atcs4teb3clasc2le22cle_c5lecc4at_clev3cli1mtad4icli2qclo4q4stakclue4clyp55clystad2rtae5n1c2o2case5car4vco5ba3tagrco3cico5custab23tail4cody2tairco5etco3grcar5mt4ais4col_col3atal2css5poco5lyta3lyco4met4anecomp4cap3uta4pass5liss1ins1sifs1siccon3scon3ts3siacapt4coop4co3orcop4eco3phco5plco3pocop4t2corassev3s5seus1sel1tard3corn4corotar3n5cort3cos_sre4ssreg5co5ta3tarr5cotytas3it3asmco3vacow5a5tassco5zic4anotas4t5craftat4rc4ran5spomcam4is4plysple2ca3maca3lys2pins2pids3phacal4m4speocri3lcron4so3vi4crousov5et5awacrym3cryo34c5s4csim5tawn43calcc3tacc4alaso5thct1an4soseca3gos3orycad4rc4teasor3os2o2ps4onect5esct5etct2ics2onaso3mo1so2mc3timsol3acaco3c4acesody4sod3oc5tio2s3odc3tittcas4tch5u4t1d4smo4dsmi3gc1tomc3tons3mensmas4b3utec2tres3man3bustc2tumte3cr2s1m4buss2s5lucslov5c2ulislo3cs3lits5leycu4mi5cunacun4e5cuni5cuolcu5pacu3pic3upl4tedds3lets5leabur3ebunt4cus5a3slauc3utr4tedobun4a4teeicy4bib4ulit3egoteg1rcy5noteg3us1latbsin41tellbsen4d4abr1d2acdach43tels3dact4b1s2sky3ld4aled4alg4bry_dam5a3damed3amida5mu3dangs5keybrum4d3ard5darms3ketbros4tem3as5kardat4ub4roa4teme4tenet5enm4tenob2ridteo5l4bre_5sivad3dlid3dyite3pe4s1ivde5awde4bisi4teb2ranbram44sismde1cr4dectded3i4sishs1is24bralde4gude3iosi4prtep5i4sio_1sio45sinkde5lo1d4emsin3is2ine4boxy1silibow3ssif5f4demybous4den4d4dened3enh4sidssi4de4sid_3bourde3oddeo3ldeon2si4cu5terd3sicc4s1ibde2pu5botishys44shu4d4eres3hon5shipsh3io1derider3k3dermsh5etsh1er4shab1teri2s1g4der3s5deru4des_de3sa5descbor4nter5k3terrdes4isexo23borides1psewo4de3sq2t2es5seum1de1t4tes_de5thde2tise5sh4ses_bor3d3septsep3atesi4t3esqdfol4tes4tteti4dgel4d4genbon4ebon4cdhot4bol4tbol3itet1rdi2ad3diarbol4e4d1ibd1ic_3sensdi4cedi3chd5iclsen5g1dictsem4osem2i5self4sele4boke5selasei3gd4ifo2boid3seedbod5i5dilldilo4di3luse4dabo5amdi1mi2d1indin4ese2cosec4a3di1odio4csea3wdip5t3diredi3riseas4di4s1d4iscs4eamb3lis3dissbli2q2s1d22s1cud3itos4coi2ditybli3oscof44blikscid5dix4i3bler4the_b3lan5dlefblag43dlewdlin45blac4b5k4bi5ve4d1n24bity4thea4thed4sceidog4abis4od4ol_s4ced5bismscav3sca2pd4ols5dom_1thei3theobi3ousbe4sdo5mos4bei4donybio5mbio3l4dor_dor4mdort41bi2ot4hersavi2dot1asaur52dousd4own4thi_th5lo2thm25binad3ral3dramdran4d4rassat1u3dreldres4sa2tedri4ed4rifs2a1td4romsas3s3sas_4d1s2th4mi3thotds4mi1th2rb2iledt5hobigu3bi5gadu1at5thurduch5sar5sdu4cosap3rbid5idu5en2santdu5indul3cd3uledul4lsan3adun4asamp43b2iddu3pl5durod5usesam5o5thymbi4b1dver2be3trsa3lube3sl3sale2bes_be1s2dy5ar5dy4e3thyrber5sdyll35dymi5berrdys3pberl4thys42beree1actbe5nuea5cue5addbe1neead1i1ti2ati3abben4deal3abel4tsad5osad5is3actean5i2t3ibsac4qe3appear3a5sacks3abl2belebe3labe3gube5grryp5arym4bry4goeas4t5rygmry5erbe3gobe4durvi4tr3veyr3vetr3vene4atube4doeav5ibed2it3ic_eaz5ibe3daebar43becube3caru3tirus4pe2beneb5et4bease5bile4bine4bisbdi4ve4bosrur4ibde4beb1rat2icie4bucru3putic1ut3id_run4trun4ge5camrun2eec3atr4umib3blir4umeech3ie4cibeci4ft4ida2b1b2ru3in3tidirue4lt5idsru4cerub3rr4ube1tif2ec1ror4tusti3fert5sirto5lr1t4oec1ulrt3li4tiffr2tize2dat3tigie4dede5dehrt3ivr2tinrth2ir5teue3deve5dew5barsr5tetr1ted4tigmr3tarrta4grt3abed1itedi2v5tigued3liedor4e4doxed1ror4suse2dulbar4nrs5liee4cers3ivee4doti4kabar4d5barbr4sitba4p1r3sioeem3ib4ansee4par4sileesi4ee3tot4illr5sieefal4rs3ibr3shir3sha5bangr3setb4anee4fugrsel4egel3egi5ae4gibe3glaeg3leeg4mir3secr3seat4ilte5gurban4abam4abal5utim1abal3abag4a5eidobaen43backr4sare4in_e3ince2inee1ingein5ir2sanei4p4eir3oazz4leis3ir2saleith4azyg4r4sagaz5eeaz3ar2r1s2ek3enek5isayth4e4lace5ladr3rymelam4r3ryi3tinnay5sirro4trrog5rrob3ay5larric4ax2idrrhe3rre2lele3orrap4el1ere1lesrra4h4r1r44tinst4intrpre4el5exrp5ise1lierph5ee3limav1isti3ocrp3atav3ige3livavas3r4oute3loae3locroul35rouero3tue2logro1te4rossr4osa4roreel3soror5dav5arelu4melus42t1ise5lyi3elytr4opr4rop_emar4tis4c5root1roomem5bie1me4e4meee4mele3mem3tissro1noro3murom4pe4miee2migro3lyro3laroid3e3mioro3ictis2te4miuro3gnro1fero3doava4ge2moge4moiro3cuem5om4emon5roccro5bre2morro4beav4abr5nute5mozrnuc4au3thr5nogr3noc3titlem3ume5muten3ace4nalrn3izrni5vr1nisrn3inr3nicrn5ibr5niaenct42t1ivr3neyr3netr3nelaus5pene5den3eern5are5nepe2nerr5nadr3nacrn3abt3iveen1et4aus_rmol4e3newen3gien3icr3mocrmil5en5inr5migaur4o5tleben3oieno2mrm4ieenov3aun3dr2micen3sprme2arm4asr2malr5madr3mac3tlefen2tor4litau3marlat33tlem5tlenen3uaen3ufen3uren5ut5enwa5tlewe4oche4odaaul4taul3ir3keyr3ketrk1ere5olutlin4eon4ae3onteop4te1or1r5kaseor3eeor5oeo1s2eo4toauc3oep4alaub5iepa4t4a2tyr2i4vr2ispris4cep5extmet2eph4ie2pige5pla2t3n2ri5orri4oprio4gatu4mrin4sr4inorin4e4rimse1p4u4rimmr4imbri2ma4rim_at1ulr4ileri2esera4gera4lri3erri5elrid4e2ricur4icl2riceri3boer3be2r2ib2a2tuer3cher3cltoas4ri5apri3am4toccat1ri4ered3r2hyrhos4tod4irgu5frg5lier3enr3gerr3geor5geee3reqer3erere4sa4trergal4r4gagat3rarfu4meret42a2tra5tozatos4ere4ver3exreur4er3glre3unre3tur3esq2res_er2ider3ierere4rer4aer3into5dore5phre1pe3reos3reogre3oce3river5iza3too4atoner3mer4enirene2rena4r3empr5em_re1le4ero_re1lam5ordreit3re3isre1inre3if2atolre2fe3reerree3mre1drre1de2r4ed4atogeru4beru5dre3cure3ce3reavr5eautol4ltolu5es5ames5an4atiure3agre3afr4ea_to5lye3seatom4be5seeat1itese4lr4dolrd3lie1shie5shurdi3ord2inr5digr4dier4desr2dares3imes3inr5dame4sitrc5titon4er5clor4clees4od3tonnrcis2rcil4eso3pe1sorr2cesrca4ston3ses4plr4bumr2bosrbit1r2binrbic4top4er4beses2sor3belrbe5ca4timrbar3e2stirb1anr4baga2tif4toreest4rrawn4tor5pra3sor4asktor4qr2aseras3cati2crare2eta3p4rarcran2tet4asra3mur5amnet5ayra3lyra3grra4de3tos_eter2r2acurac4aetex4e2th1r2abo2etia5rabera3bae5timet3inath5re3tir5quireti4u1quet2que_e2ton4quar5quaktos4ttot5uath3ipyr3etou4fet1ri5tourt3ousath3aet1ro4a2that5etetud4pu3tre4tumet4wetra5q3tray4ater4tre_4trede3urgeur5itren4pur3cpur5beut3ipu3pipun2tpun3i3puncev3atpun4aeve4n4trewpum4op4u4mpu5ere4vese1viapuch4e2vict2rieevid3ev5igpu5be2trilt2rit4trixe4viuevoc3p5tomp3tilata3st4rode4wage5wayew1erata3pew5ieew1inp5tiee3witatam4ex5icpt4ictro5ft2rotey4as2a2taey3s2p5tetp1tedez5ieas5uras4unfab4ip2tarfact2p4tan2f3agp4tad5falopt3abtro1v3psyc3troypso3mt4rucfar3itru3i2t4rytrys42asta3feast4silfeb5ras3ph2fed1as5orfe1lifem3i2t1t4p3sacf5enias4loas4la3feropro1l4pro_3ferrfer3v2fes_priv24priopren3aski43prempre1dfet4ot3tabpreb3as5iva3sit4pre_f5feta5siof5fiaf3ficf5fieffil3prar4ff4lepra5dffoc3prac1as3int5tanppi4ct5tast3tedfib5u4fic_ppet33fici4ficsppar34p1p2fiel4asep4p5oxi1fi2l4asedfin2apo1tefind3fin2ef1ing3p4os3portpor3pf3itapo4paas2crt3tlifle2s2ponyflin4t5toip4o2nasan2pom4eas4afa5ryta3ryot5torar3umt3tospo3caar2thar3soar2rhar4pupnos4tu5bufor5bar3oxtu5en5formplu2m2plesaro4ntu4is3plen3plegfrar44ple_fre4sar3odfruc42tum_3tumi4tumsf1tedtun4aft5es2p3k2p2itutu4netur4dtur4npis2sfug4ap4iscfun2gp4is_fur3npir4tfus5oar3guar5ghpi4pegadi4pip4at3wa4ar3en3gale3pi1op4innpin4e3galot3wit5pilo3piletwon4pig3n5tychpict4g5arcg4arepi4crpi3co4picagar5p5garr1ga4sgas5igas3o3piarar4bl3phyltyl5ig4at_2phy_phu5ity5mig4attgat5ugaud5ga5zaar3baara3va3rau5geal3gean2ge4d3gedi5gednar1at3type4gelege4li1tyr13phrage4lu2gelygem3i5gemoara3mph3ou3phorgen3oa3rajt5ziat5zie4gereph1is2ges_5gessphi4nua3ciget3aara2ga5quia5punua5lu1philg3ger4phic3phibg3gligglu3g5glyph3etg4grouan4og5haiuar3auar2dg4hosuar3iap5lia5pirph2angi4atu1b2igi5coap3in4phaeub5loub3ragi4orgi4otaph3igi5pag4i4s5gis_gi2t15gituu1c2aa5peug3laru5chrglec43glerap3alpe4wag4leypet3rpe2tia1pacaol3iglom34glopa5nyian5yap4ery3glyp2g1m4a5nuta3nurg4nabper3vp4eri4pere5percpe5ongn5eegn3eru4comg4niapen5upel5v4pelean3uluco5tgno4suc2trant4ruc3ubuc5ulu5cumgo4etgo4geu5dacg5oidgo3isgo2me5gonnpe2duud1algoph44gor_5gorg4gorsg4oryud5epgos4t1anth3pedsg1ousan2teu4derudev4grab43gram3pedigra2pudi3ogril43pedeu5doigro4gg5rongrop4ud5onan3scgru5ipe4coan5otan2osanor3g4stiu5doran2oeg4u2agu5ab5guan4annyg5uatan5no5gueu4aniuuen4ogu2magu4mi4anigpawk4uer3agur4ngur4u4gurypau3pani3fan3icues4san3euan4eagyn5ouga4cug2niug3uluhem3ui3alp5atohae3opas1t1p4ashag5uha5ichais4par3luid5ouil4apa3pypap3uhan2gpa3pepa4pahan4tpan3iha4pehap3lhar1ahar5bhar4dpan1ep4alspa3lohar3opain2paes4pad4rhat5ouil4to3zygozo5ihav5oana5kuin4san3aeuint4amyl5am3ului5pruis4t1head3hearui3vou4laba3mon4ulacu5lathe3doheek4ul4bohe3isul3caul4ch4uleaow5slow5shu5leehem1aow5in3amidow5hahem4pow1elhe3orulet4h1er_owd3lher2bowd4io5wayow3anow3ago1vish5erho5varouv5ah1erlouss42ouseh1ersoun2dul4evami2cul2fahet3ioul4tul4iaheum3ou5gihe4v4hev5ihex5oa3men3ambuu5lomhi4aram1atou5gaul4poh4iclh5ie_h1ierou3eth1iesama4gh3ifyhig4ohi5kaa5madoud5iou5coou5caa5lynhin4dou5brul1v45ou3aalv5uh2ins4o1trh4ioral1vahip3lum3amhir4ro4touhit4ahiv5aumar4u5masalu3bh3leth1l2ihli4aum2bio1t2oot4iv2h1n2o5tiaal3phho3anho4cou4micho5duho5epo4tedhold1o3taxo3tapot3ama5lowh2o4nos1uru4mos4ostaos4saos1pihon1o1hoodhoo5rh4opea4louo5sono5skeh4orno4sisos1inos5ifhosi4o3siaalos4os5eual1ora3looo2seta3lomoser4hr5erhres4um4paos5eohrim4h5rith3rodose5ga5loeo3secumpt4un5abun4aeht5aght5eeo4scio2schos4ceos4caht5eoht5esun2ce4aliuosar5un3doos3alosa5iory5phun4chunk4hun4thur3ior4unu1nicun4ie4or1uun3inal1in5aligal3ifal1iduni5por4schy1pehy3phuni1vor1ouun3iz2i1a2ia4blo5rooorm1ii2achiac3oa2letork5a5origa1leoun3kni2ag4ia3gnor3ifia3graleg4a3lec4ori_al3chor5gn4ialnor4fria5lyi5ambia3me5orexi3anti5apeia3phi2ardore4va5lavor3eiore3giat4uore3fal3atun3s4un5shun2tiibio4or4duib5lia1laei4bonibor4or4chi5bouib1riun3usoram4ic3acor5ali4calic1an2icariccu4akel4i5ceoa5ismich4io5raiora4g4icini5cioais1iic4lo2i2coico3cair3sair5pi5copop2ta2i1cri4crii4crui4cry1op1top5soopre4air5aop2plic3umopon4i5cut2i1cyuo3deain5oi5dayide4mo4poiain3iu1pato1phyid3ifi5digi5dili3dimo4pheo1phaidir4op1ero5peco4pabidi4vid3liid3olail3oai5guid3owu5peeid5riid3ulaid4aa5hoo2ieg2ie3gauper3i5ellahar22i1enien2da1h2aoo4sei2erio3opt4iernier2oi4erti3escagru5oon3iag3ri2i1eti4et_oo4leag5otook3iiev3au5pidiev3o4ag1nagli4if4fau5pola5giao5nuson5urifi4difi4n4i2fla5gheifoc5ont4rupre4af5tai3gadaev3a3igaraeth4i3geraet4aono3saes3ton5oionk4si3gonig1orig3oto1nioo5nigon3ifig1urae5siae3on4ura_aeco34uraead3umura2gik5anike4bi2l3aila4gon4id4a2duil4axil5dril4dui3lenon4guuras5on1eto3neoon1ee4oned4oneaad1owon5dyon3dril1ina3dos4onauon3aiil5iqona4do2mouil4moi5lonil3ouilth4il2trad3olil5uli5lumo4moi4adoi4ilymima4cim2agomni3im1alim5amom2naomme4om2itomil44adoeomi2co3mia3adjuome4gurc3ai5mogi3monim5ooome4dom4beo3mato2malo2macim5primpu4im1ulim5umin3abo4mabur4duadi4p4olytina4lol1ouin5amin3anin3apo3losol1or4olocur3eain3auin4aw4adilol3mia5difolle2ol2itolis4o5lifoli2eo1lia4inea4inedin5eeo3leuol1erine4so3lepo3leo4ineuinev5ol5chol4an4infu4ingaola4c4ingeur5ee4ingiad4haur1er4ingo4inguoith44adeeada3v4inico3isma5daiur3faac2too3inguril4ur1m4ac3ry4ino_in3oioil5i4inos4acou4oideo2i4d4acosurn5soi5chinse2o3ic_aco3din3si5insk4aco_ac3lio3ho4ack5aohab34acitacif4in5ulin5umin3unin3ura4cicuro4do5gyrur5oturph4iod5our3shio3gr4i1olio3maog4shio3moi5opeio3phi5opoiop4sa5cato4gro4ioreo2grio4got4iorlior4nio3sci3osei3osii4osoog2naur5taiot4aio5tho4gioio5tri4otyur1teo5geyac3alurth2ip3alipap4ogen1o3gasip1ato3gamurti4ur4vaofun4iphi4i4phuip3idi5pilip3ino4fulipir4ip5isab1uloflu42abs_ip3lou3sadi4pogus3agi4pomipon3i4powip2plab3omip4reoet4rip1uli5putus3alabli4i3quaab3laus4apoet3iira4co4et_ir4agus3atoes3t4abio2abiniray4ird3iire3air3ecir5eeirel4a3bieires4oelo4ab1icoe5icir4ima3bet5irizush5aoe5cuir5olir3omusil52abe4ir5taoe4biabay4us4pais5ado5dytis1alis3amis1anis3aris5av_za5ri2s3cod3ul_xy3lod5ruo3drouss4eod3liis2er5odizod5it4iseuod4ilodes4o5degode4co5cyt2isiais5icis3ie4isim_vo1c4isisis4keus1troc5uo2ismais1onocum4iso5pu5teooc1to5ispr2is1soc2te_vi2socre3u3tieiss4o4istao2cleu3tioo5chuoch4e4istho4cea4istloc5ago3cadis1tro4cab4istyi5sulis3urut3leutli4it5abita4c4itaiit3am_vec5it4asit3at_ur4oit3eeo3busob3ul_ura4_up3lo3braith5io5botith3rithy52itiao5bolob3ocit1ieit3ig4itim_un5uob1lio3blaob3iti5tiqut5smit3ivit4liit5lo4ito_it5ol2itonit1ou_un5sobe4lu4tul_un3goat5aoap5ioan4t4itueit1ulit1urit3us2i1u2_un3eiur5euven3oal4iiv1ati4vedu5vinoad5io3acto5ace_ul4luy5er2v3abives4iv3eti4vieiv3ifnyth4va1cavacu1iv1itva4geivoc3vag5rv1al_1vale_tor1vali25valu4izahiz3i2_til4iz5oivam4i_tho4va5mo5vannnwom4jac3ujag5u_te4mja5lonwin44vasev4at_jeop34vatuvect4_ta4m4velev1ellve1nejill55jis_4venu5ve3ojoc5ojoc5ujol4e_sis35verbju1di4ves__ses1ju3ninvi4tjut3a_se1qk4abinvel3kach4k3a4gkais5vi1b4vi4ca5vicuvign3vil3i5vimekar4i1kas_kaur42v1invin2evint4kcom43vi1oviol3kdol5vi5omke5dak5ede_rit2_rin4ken4dkeno4kep5tker5ak4erenu1trker4jker5okes4iket5anu4to5vi3pkfur4_re3w_re5uvire4kilo3vir3uk2in_3kind3nunc5numik3ingkin4ik2inskir3mkir4rv3ism3kis_k1ishkit5cvit2avit1rk5kervi3tu_re5ok5leak3lerk3let_re1mv3ity_re1ivi5zovolv41know3vorc4voreko5miko5pe3vorok5ro4_po2pv5ra4vrot4ks2miv3ure_pi2ev5verwag3owais4w3al_w3alswar4fwass4nu1men3ult5labrwas4tla2can4ulowa1tela4chla2conu4isw4bonla3cula4del5admw5die_out1nug4anu3enlag3r5lah4nud5i_oth54lale_osi4_or2o_or4ilam1ol5amu_ore4lan2d_or3dn5turntub5n3tua3weedweir4n5topwel3ilapi4n3tomn1t2o_op2i_on4ent3izla4tenti3pn3tign1tient4ibwent45laur_ome2_ol4d_of5twest3_oed5l4bit_ob3lw5hidl2catwid4elcen4n1thelch4el3darl3dedl3dehwi5ern4teol5dew_no4cl3dien3teln4tecwim2pld5li_ni4cwin2ecen3int1atnt1aln3swale3cawl1ernsta4_na5kle5drleg1an3s2t3leggn5sonleg3ons3ivwl4iensi2tlel5olelu5n3sion3sien3sid5lemml3emnle2mon4sicns3ibwon2tn3sh2n5seule1nen2seslen3on5seclen5ule3onleo4swoun4wp5inn4scun2sco_mis1_mi4enre3mnre4ix4ach4les_x4adenpri4x3aggnpos4npla4npil4leur5x3amil3eva5levexan5dle4wil5exaxano4lf5id_lyo3lf3on_lub3l4gall4gemlgi4al4gidl4goixas5pxcav3now3llias4lib1rl1ic_5lich_lo2pnove2nou5v2nousli4cul3ida3nounn4oug3lieul4ifel4ifoxcor5_li4p3notenot1a_li3oxec3r1l4illil4ilim2bno3splim4pnos4on4os_lin4dl4inenor4tn4oronop5i5nood4noneno2mo1nomi3linqnol4i3liogli4ollio3mliot4li3ou5liphlipt5x5edlx5edn_le2pl4iskno3la_le4ml2it_n5ol_no4fa3lithnoe4c3litrlit4uxer4gn4odyno4dinob4ln5obilk5atxer3on5nyi_ki4ex3ia_nnov3x4iasl5lasl4lawl5lebl1lecl1legl3leil1lellle5ml1lenl3lepl3leul3lev_is4o_is4c_ir3rx5ige_in3tllic4nlet4_in3ol5lie4n1l2l2linnk5ilnk5ifn3keyl5liolli5v_in2ixim3ank5ar_in3dllo2ql4lovnjam2_im5b_il4i_ig1n_idi2llun4l5lyal3lycl3lygl3lyhl3lyil5lymx4ime_hov3_ho2ll4mer_hi3bl5mipni3vox4it__he4ilneo4x4its5loadniv4ax4ode_hab2ni4ten5iss2locynis4onis4l_gos3n4isk4loi_lo5milom4mn4is_lon4expel43nipuni1ou5nioln4inu5ninnnin4jn4imelop4en3im1l3opm1lo1qnil4ax4tednik5e3nignn3igml4os_lo1soloss4_ga4mnift4nif4flo5tu5louplp1atlp3erxtre4l5phe_fo3cl2phol3piel3pitxur4b1y2ar_eye3_ex3a3yardl5samls5an4nicllsi4mls4isyas4i_eur4l1s2tni3ba3niac_es3tl5tar_es3pl4teiyca5mlth3inhyd5y3choltin4lti3tycom4lt4ory2cosnhab3_er2al4tusyder4_epi1luch4_eos5n2gumlu4cu_ent2lu1enlu5er_en3slu4ityel5olu4mo5lumpn4gry_en5c5lune_emp4n5gic_em3by5ettlusk5luss4_el2in5geen4gae_ei5rlut5r_ei3dygi5a_ec3t_eco3l4vorygo4i_dys3_du4c_do4eyl3osly4calyc4lyl5ouy1me4news3_de4wly4pay3meny5metnet1ry5miaym5inymot4yn4cim4acanet3an1est1nessn1escmact44mad_4mada4madsma4ge5magn2nes_yn3erma5ho3ma4i4mai_maid3_der2ner2vner5oyni4c_de1mneon4m3algneo3ln3end4n1enne2moyoun4n4ely2neleyp5alneis4man3a5negune3goneg3a3nedi_dav5m4ansne2coyper3m3aphy4petne4cl5neckn3earyph4en3dyind2wemar3vn4dunndu4bn2doundor4n5docnd1lin3diem4at_n1dicnd4hin5deznde4snde4ln1dedn3deayph3in3damm4atsn3daly4p1iy4poxyp5riyp4siypt3am5becn4cuny3ragm4besyr3atm2bicnct2oyr3icm4bisy5rigncoc4n1c2lm3blimbru4mbu3lmbur4yr3is_can1ys5agys5atmea5gn4cifme4bame4biy3s2c4med_n4cicn3chun3chon3chan5ceyme4dom5edy_bre2n5cetn3cer4melen1c2anbit4nbet4mel4tnbe4n_bov4ys1icys3in3men_2menaysi4o3nautnaus3me1nenat4rnati45meogys4sonas3s4merenas5p2me2snas5iys4tomes5qyz5er1me2tnam4nmet1e3nameza4bina3lyn5algmet3o_aus5_au3b_at3t_at3rza4tena5ivmi3co5nailm4ictzen4an5agom4idina4ginag4ami5fimig5an2ae_mi2gr_as4qmi5kaz5engm3ilanadi4nach4zer5a3millmi5lomil4t3m2immim5iz3et4_ari4_ar4e_ar5d5zic4_ap4i5my3c_any5z3ing3zlemz3ler_an3smu4sem5uncm2is_m4iscmi4semuff4zo3anmsol43zoo2_and2zo3olzo3onzo5op4mity_am2i_al1k_air3_ag5nmlun42m1m2_ag4amp5trmp3tompov5mpo2tmmig3_af3tmmis3mmob3m5mocmmor3mp3is4m1n2mnif4m4ninmni5omnis4mno5l_af3f_ae5d_ad3o_ad3em3pirmp1inmo4gom5pigm5oirmok4imol3amp5idz3zarm4phlmo3lyz5zasm4phe_ach4mona4z3ziemon1gmo4no_ace45most_ab4imo3spmop4t3morpz5zot",6:"reit4i_ab3olmo5rel3moriam5orizmor5onm3orab3morse_acet3_aer3i_al5immo3sta2m1ous_al3le4monedm4pancm4pantmpath3_am5ar_am3pemper3izo5oti_am3phmo4mis_ana3b_ana3s_an5damog5rimp3ily_an4el_an4enmmut3ammin3u_an4glmmet4e_ant3am3medizing5imman4d_ar5abm5itanm3ists_ar5apmsel5fm3ist_5missimis3hamuck4e4misemmul1t2_ar4cimu5niomun3ismus5comirab4mus5kemu3til_at5ar1m4intmin3olm4initmin5ie_bas4i_be3di5myst4_be3lo_be5sm5min4d_bi4er_bo3lo_ca3de_cam5inac4te_cam3oyr5olona4d4amil4adnad4opyr3i4t_car4imid5onn4agen_ca4timid4inmi4cus_cer4imi3cul3micromi4cinmet3ri4naledyp5syfn4aliameti4cmeth4i4metedmeta3tna5nas_cit4anan4ta_co5itnan4to_co3pa4n4ard_co3ru_co3simes5enmer4iam5erannas5tenat5alna5tatn4ateena3thenath4l5mentsn4ati_nat5icn4ato_na3tomna4tosy4peroy4periy5peremend5oyoung5naut3imen4agna5vel4m5emeyo4gisnbeau4_de3linbene4mel3on_de3nomel5een4cal_yn4golncel4i_de3ra_de3rimega5tncer4en4ces_yn5ast3medityn5ap4nch4ie4medieynand5ynago43mediaym4phame5and_de3vem5blern4cles_dia3s_di4atmb5ist_din4anc4tin_dio5cm5bil5m4beryncu4lo_east5_ed5emncus4tmbat4t_elu5sn3da4c3m4attn4dalema3topnd3ancmat5omma3tognde3ciyes5tey3est__em5innd3enc_em5pyn3derlm4atit_en5tay4drouma3term4atenndic5undid5aydro5snd5ilynd4inend3ise_epi3d_er4i4nd5itynd3ler_er4o2_eros43mas1ty4collnd5ourndrag5ndram4n5dronmassi4y4colima3sonyclam4mar5rima3roone3aloma5ronne2b3umar5ol5maran_erot3_er4rima5nilych5isne4du4manic4man3dr_eth3e3m4an__eval3ne5lianeli4g_far4imal4limal3le_fen4dm3alismal3efmal5ed5male24nered_fin3gxtra3vner4r5mal3apxtra5d2mago4ma4cisne3sia5machy_fu5ganes3trmac3adnet3icne4toglys5erxtern3neut5rnev5erlypt5olymph5n4eys_lyc5osl5vet4xter3ixpoun4nfran3lv5atelu5tocxpo5n2_ge3ron3gerin5gerolut5an3lur3olu3oringio4gn5glemn3glien5gliol3unta_go3nolu2m5uxo4matluc5ralu2c5o_hama5l3t4ivltim4alti4ciltern3lt5antl4tangltan3en4icabni4cen_hem5anict5a_hy3loni4diol3phinni4ersximet4lot5atnif5ti_ico3s_in3e2loros4lo5rof_is4li_iso5ml4ored_ka5ro_kin3e5nimetn4inesl3onizl3onisloni4e3lonia_lab4olo5neyl5onellon4allo5gan3lo3drl3odis_la4me_lan5ixen4opnitch4loc5ulni3thon4itosni5tra_lep5rni3trinit4urloc3al5lob3al2m3odnivoc4niz5enlm3ing_lig3anjur5illoc5ulloc3an5kerol3linel3linal5lin__loc3anland5lli5col4liclllib4e_loph3_mac5ulli4anlli5amxa5met_math5llact4nni3killa4balk3erslk3er_lkal5ono5billiv5id_ment4_mi3gr_mirk4liv3erl5ivat5litia5liternois5il3it5a5lisselint5inom3al3lingu5lingtling3i3nonicw5sterws5ingnora4tnor5dinor4ianor4isnor3ma_mi5to_mo3bil4inasl4ina_wotch4word5ili5ger_mon3a5lidifl4idarlict4o_mu3ninova4l5licionov3el_mu3sili4cienow5erli4ani_myth3_nari4le5trenpoin4npo5lale5tra3les4sle3scon4quefler3otleros4ler3om_nast4le5rigl4eric3w4isens3cotle5recwin4tr_nec3tle5nielen4dolend4e_nom3ol5endalem5onn5sickl5emizlem3isns5ifins3ing_nos3tn3s2is4leledle3gransolu4le4ginn4soren4soryn3spirl3egan_obed5nstil4le5chansur4e_ob3elntab4unt3agew5est__oe5sont5and_om5el_on4cewel4liweliz4nt3ast_opt5ant5athnt3ati_or3eo3leaguld3ish_pal5in4tee_n4teesld4ine_pa5tald3estn4ter_n3terin5tern_pecu3war4tel5deral4cerenther5_ped3elav5atlat5usn4tic_ward5r_pend4n4tics_pep3tn3tid4_pi3la_plic4_plos4_po3lan5tillnt3ing_pop5lvo3tar_pur4rn4tis_nt3ismnt3istvo5raclat5al4laredlar5delar5anntoni4lan4tr_re3cantra3dnt3ralviv5orn3tratviv5alnt3rilv5itien5trymlan3etlan4er3landsvi5telland3i3land_lan3atlam4ievi3tal2v5istla4ic_la4gisla3gerlac5on5visiola5cerla5ceolabel4vi5ridlab5ar_re3ta5numerkin5et_rib5anu3tatn5utivkey4wok5erelkal4iska5limk2a5bunven4enven5o_ros3ajuscu4_sac5rjel5laja5panja2c5oi5vorevin5ta_sal4inym5itv5iniz5vinit3vinciiv3erii4ver_iv5elsoad5ervin4aciv5el_oak5ero3alesiv5ancoal5ino5alitit5uar_sanc5oar5eroar4se_sap5ait4titoat5eeoat5eri4tric_sa3vo4i5titob3ing2obi3o_sci3e4itio_it4insit4in_it5icuiti4coi5tholitha5lobrom4it3erait3entit3enci3tectit4ana3istry_sea3si4s1to5vider_sect4oc5ato4o3ce25vict2ocen5ovice3r_se3groch5ino3chon_sen3tvi4atroci3aboci4al5verseis4taliss4ivis5sanis4saliss5adi3s2phocu4luver4neislun4ocuss4ver3m4ocut5ris3incis5horocyt5ood3al_ish3op4ishioode4gao5dendo3dentish5eeod3icao4d1ieod3igais3harod1is2v5eriei2s3etis5ere4is3enis3ellod5olood5ousise5cr4i1secisci5cver3eiver5eaven4tris5chiis3agevent5oir5teeir5ochve5niair4is_ir2i4do3elecoelli4ir5essoe3o4pire5liven4doi5rasoven4alvel3liir4ae_ir4abiv4ellaip3plii4poliip3linip4itiip1i4tip4ine_su5daiphen3i1ph2ei3pendog5ar5v3eleripar3oi4oursi4our_iot5icio5staogoni45ioriz4ioritiora4mvel3atiod3i4ioact4_sul3tintu5m_tar5oin3til_tect45vateein4tee_tel5avast3av5a4sovar4isin3osiin5osei3nos_oi5ki5oil3eri5noleoin3de4vantlvanta4oin4tr_ter4pin3ionin4iciin5ia_oit4aling3um4ingliok4ine4ingleing5hain5galo4lacko5laliinfol4olan5dol5ast_thol45val4vole2c4ol5eciol5efiine5teole4onin3esi4in5eoo3lestin5egain5drool3icao3lice_ti5niol5ickol3icsol5id_va5lieo3lier_tri3dinde3tvager4oli5goo5linaol3ingoli5osol5ip4indes5inde5pin5darollim34vagedol4lyi3vag3ava5ceo4inataol3oido4lona_tro4vi3nas_in4ars_turb44ol1ubo3lumi_turi4ol3us_oly3phin3airin5aglin4ado4inaceimpot5im5pieo4maneomast4_tu5te_tu3toi3mos_im5mesomeg5aome3liom3enaomen4to3meriim5inoim4inei3m2ieomic5rom4ie_imat5uom4inyomiss4uv5eri_un5cei5m2asim3ageil5ureomoli3o2mo4nom5onyo4mos__un5chilit5uom5pil_un3d2il4iteil5ippo5nas__uni3c_uni3o4iliou_un3k4oncat3on4cho_un3t4u4t1raon3deru4to5sili4feili4eri5lienonec4ri3lici_ve5loon5ellil3iaron3essil3ia_ong3atilesi45u5tiz4o1niaon5iar2oni4conic5aut3istut5ismon3iesigu5iti4g5roi5gretigno5m4onneson5odiign5izono4miu5tiniut3ingo5nota_ver3nig3andu4tereon4ter_vis3ionton5if5teeon4treif5icsut5eniutch4eif3ic_u3taneoof3eriev3erook3eri5eutiiet3ieool5iei3est_i1es2ties3eloop4ieieri4ni3eresus5uri4idomioot3erooz5eridol3ausur4eo5paliopa5raopath5id4istopens4id1is43operaus4treidios4_vi5sooph4ieo5philop5holi3dicuus1to4iderm5op3iesop5ingo3p2itid3eraust3ilid3encopol3ii5cun4op5onyop5oriopoun4o2p5ovicu4luop5plioprac4op3ranict5icopro4lop5ropic4terust5igust4icicon3ous5tanic5olaor5adoich5olus3tacic5ado4oralsib3utaoran3eab5areorb3ini4boseorch3iibios4ib3eraor5eadore5arore5caab5beri5atomia5theoreo5lor3escore3shor3essusk5eru4s1inor5ett4iaritianch5i2a3loial5lii3alitab3erdor3ia_4orianori4cius5ianorien4ab3erria5demori5gaori4no4orio_or5ion4oriosia5crii2ac2rus4canor3n4a5ornisor3nitor3oneabi5onor5oseor5osohys3teorrel3orres3hyol5ior4seyor4stihyl5enort3anort3atort3erab3itaor3thior4thror4titort3izor4toror5traort3reh4warthu3siahu4minhu5merhu4matht4ineht4fooht3ensht3eniab4ituht3en_ab3otah3rym3osec3uhrom4ios5encosens43abouthre5maabu4loab3useho4tonosi4alosi4anos5ideo3sierhort5hho5roghorn5ihor5etab3usio3sophos3opoho2p5ro3specho5niohong3ioss5aros4sithon3eyur3theos4taros5teeos5tenac5ablur5tesos3tilac5ardost3orho5neuhon5emhom5inot3a4gurs3orho4magach5alho5lysurs5ero5ta5vurs5alhol3aroter4muroti4ho3donachro4ur5o4mach5urac5onro5thorurn3ero5tillurn3alh5micao3tivao5tiviur5lieo5toneo4tornhirr5ihio5looturi4oty3lehi5noph5inizhi5nieh2in2ehimos4hi5merhi5ma4h3ifi4url5erhi4cinur5ionur4iliur4ie_ac2t5roult5ih4et3ahes3trh5erwaound5aac5uatur3ettoun3troup5liour3erou5sanh4eron5ousiaher5omur1e2tur3ersova3lead5eni4ovatiad3icao4ver_over3bover3sov4eteadi4opadis4iovis5oo2v5oshere3ohere3aherb3iherb3aher4ashende5ur5diehe5mopa3ditihemis4he3menowi5neh3el3ohel4lihe5liuhe3lioh5elinhe5lat5admithe5delhec3t4adram4heast5ad3ulahdeac5ae4cithavel4ura4cipac4tepa5douhas4tehar4tipa3gan4pagataed5isu5quet4pairmpa5lanpal3inag4ariharge4pan5ac4agerihant3ah5anizh1ani4agi4asham5an4aginopara5sup3ingpa3rocpa3rolpar5onhagi3oag3onihaged5agor4apa3terpati4naha5raaid5erail3erhadi4epaul5egust5apa5vilg4uredg4uraspaw5kigui5ta5guit43guardaim5erai5neagrum4bpec4tugru3en5ped3agrim3a4grameped3isgour4igo5noma3ing_5gnorig4ni2ope5leogn4in_pen4at5p4encu5orospen5drpen4ic3p4ennal5ablg2n3ingn5edlalact4until4g5natial5ais5gnathala3map3eronalc3atald5riun4nagg5nateglu5tiglu5tepes4s3ale5ma4g5lodun5ketpet3eng5lis4gli5ong4letrg4letoal3ibrali4cigin5gigi5ganun3istph5al_gi4alluni3sogh5eniph5esiggrav3ggi4a5al5icsg5gedlun4ine3germ4phi5thgeo3logen5ti4phobla5linigen5italin5ophos3pgen4dugel5ligel4ing4atosg4ato_gat5ivgast3ral5ipegasol5ga5rotp5icalu3n2ergar3eeg5antsgan4trp4iestpi5etip5ifieg5ant_un4dus4ganed4alis_gan5atpi3lotgam4blun4diepin5et3pingegali4a5p4insga5lenga4dosga4ciefu5tilpir5acfu3sil4furedfu4minundi4cpiss5aunde4tpis4trft4inefti4etf4ter_un3dedpla5noun4dalalk5ieun4as_al4lab4pled_frant4frag5aunabu44plism4plistal4lagu4n3a4umu4lofore3tfor4difor5ayfo5ramfon4deallig4fo4liefo1l4ifoeti42p5oidpois5iump5tepo4ly1poly3spoman5flum4iump5lipon4acpon4ceump3er3ponifpon5taf3licaf5iteepo5pleal3ogrpor3ea4poredpori4ffir2m1fin4nial3ous5fininpos1s2fi3nalu4moraumi4fyu2m5iffight5fier4cfid3enfi5delal5penp4pene4ficalumen4tal3tiep4pledp5plerp5pletal5uedal3uesffor3effoni4ff3linf2f3isal5ver2a1ly4fet4inaman5dul3siffet4ala3mas_fest5ipres3aulph3op3reseulph3i5pricipri4es4pri4mam5atuam4binfest3ap5riolpri4osul4litfess3o4privafer5ompro3boul4lispro4chfe5rocpron4aul4latam5elopro3r2pros4iu5litypro3thfer3ee4feredu5litipsal5tfemin5fea3tup5sin_fant3iul5ishpsul3i4fan3aul3ingfa5lonu3linefa2c3ufa3cetpt5arcez5ersp5tenapt5enn5pteryez5er_ex4on_ew5ishamen4dp2t3inpt4inep3tisep5tisievol5eevis5oam3eraev5ishev4ileam5erle4viabpudi4ce4veriam5icapu4laramic5rpu5lisu5lentu1len4a3miliev5eliev3astpun5gieva2p3eval5eev4abieu3tereu5teneudio5am5ilypu3tat5ulcheet3udeet3tere4trima5mis_et4riaul5ardet4ranetra5mamor5aetra5getor3iet3onaamort3am5ose3quera4quere4ques_et5olo5quinauit5er3quito4quitueti4naeti4gie3ticuuisti4ethyl3ra3bolamp3liuis3erampo5luin4taet5enia5nadian3agerag5ouuinc5u3raillra5ist4raliaet3eeret3atiet3ater4andian3aliran4dura5neeui3libra3niara3noiet5aryan3arca5nastan4conrant5orapol5rap5toet3arieta5merar3efand5auug3uraan5delet3al_es4ur5e2s3ulrass5aan5difug5lifra5tapra5tatrat5eurath4erat3ifan5ditra5tocan5eeran3ellra4tosra5tuirat5umrat3urrav5aian3ganrav3itestud4ra3ziees5tooe3stocangov4rb3alian4gures5taue5starest3anesta4brbel5orb3entes4siless5eeessar5rbic5uan5ifor5binee5s2pres5potan5ionrbu5t4es5pitrcant54anityr4celean3omaan4scoans3ilrcha3irch3alan4suran2t2ar3cheor4cherud3iedr4chinrch3isr3chites3onaan3talan5tamrciz4ies3olae3s4mie3skinrcolo4rcrit5an4thies4itses4it_e5sion3anthrrd4an_es5iesr5de4lr3dens4anticrd5essrd5ianan4tiee5sickes5ic_rd3ingesi4anrd1is2rd5lere3sh4aes5encrd5ouse5seg5e3sectescut5esci5eant4ives5chees5canre5altre5ambre3anire5antre5ascreas3oeryth35erwauan4tusreb5ucre3calrec4ceer4vilan5tymre3chaan3um_an5umsap5aroerund5ert5izer4thire3disre4dolape5lireed5iu4cender4terer5tedre3finuccen5re5grare3grereg3rire3groreg3ulaph5emer4repaph5olaphyl3ero5stero5iser3oidern3it4reledre3liarel3icre5ligreli4qrel3liern3isrem5acap5icuub3linern3errem5ulu4bicuren5atr4endiap4ineren4eser4moirenic5ren4itub5blyre5num4eri2ta3planre5olare3olier4iscer3ioure4pereri4onrep5idre3pinre3plere4preeri4nauari4ner3iffre5reare3r2uapo3thre3scrre3selre3semre3serap5ronre5sitre3speapt5at4arabiara5bore5stu3retarre3tenar3agear5agire1t2ore5tonre3trare3trere5trier4ianer3ia_ergi3ver3ettrev3elrevi4ter3etser3et_ar3agoar3allaran4ger3esier5eseere5olr4geneeren4e5erende4remeer5elser5ellr5hel4rhe5oler5el_er3egrer3ealerdi4eerd5arerb5oser3batar5apaer5atuarb5etar4bidty4letri5cliri3colri5corri4craarb3lirid4aler3apyer3apier3aphera4doar4bularch5otwi5liri5gamaren5dri5l4aar5ettar3ev5ar5iff5tur5oequin4rima4gar4illrim3ate4putarimen4e3pur5ept3or5turitr4inetturf5iturb3aep5rimt4uranrins5itu5racep3rehtun5it5rioneepol3iepol3ari5p2ari5piear5iniep3licarm3erris4ise4peteris4paris4pear5mit4ristiri3tonr5it5rep5ertriv4alar3nalar3nisriv3enriv3il5ri5zoar5oidep5arceor4derk5atir5kellrk5enia5rotieol5ata5roucr3kiertud5ier5kin_r5kinsrks4meen4tusent5uptu5denr3l4icr3liner5linsen4tritu4binen5tiarma5cetuari4ent3arr4mancr4manor4marir4maryen4susars5alart5atarth4een4sumens5alrm4icar5m2iden3otyenit5ut4tupermin4erm3ingarth3rar5tizen5iere2n3euen4ettrmu3lie3nessen5esiener5var5un4as5conrn3ateas5cotrn5edlt3tlerr3nessrn5esttti3tuas3ectt5test3encept4tereen3as_rn4inee2n3arrn3isten4annash5ayem4preash5ilem5pesas5ilyempa5rask5erem3orras5ochrob3letstay4e3moniem3oloemod4uemo3birody4n4emnitem4maee4mitaem3ismem5ingem3inar4oledas4silassit5as4tatro5melro3mitas4tiaas3tisemet4eron4ac4ronalas4titron5chron4dorong5ir5onmeem5ero4asto2as3traas4trit5roto4atabiem3anaro3peltro3spem3agor5opteel5tieelp5inel5opsrosi4aro5solel5op_5troopros4tiatar3aro3tatata3t4ro4terelo4dieloc3uelo5caat3eautri3me4roussell5izel4labrow3erelit4ttri3lie4li4seli3onr3pentrp5er_el3ingat3echr3pholrp3ingat5eerrpol3ar2p5ouele3vi3tricuelev3at5ricla5tel_e5lesstres4sele5phel3enor4reo4el5eni4e4ledelea5grricu4tre5prate5lerri4oseld3ertre4moat3entat3eraelast3el5ancel5age4traddeiv3ereit5ertra4co4atesse4ins_to3warehyd5re5g4oneg5nabefut5arsell5rs3er_rs3ersa3thene4fiteath3odr4shier5si2ato3temto5stra5thonrs3ingeem5eree2l1ieed3ere4d5urrstor4to3s4ped3ulo4a3tiator5oitor5ered3imeed5igrrt3ageto5radr4tareed5icsto4posr4tedlr3tel4r5tendrt3enito5piaa2t3in4atinaat5ingede3teton5earth3rir1t4icr4ticlr5tietr5tilar5tilltom5osrt5ilyedes3tr3tinart3ingr3titirti5tue4delee5dansrt5lete5culito4mogec4titrt5ridecti4cec4teratit3urtwis4e4cremtoma4nec3ratec5oroec3oratom3acat4iviec3lipruis5iecip5i4toledec5ath5at5odrun4clruncu42t3oidrun2d4e4caporu5netecal5ea4topsec3adea4toryebus5iebot3oe4belstode5cat3ronat5rouat4tagru3tale4bel_eav5our4vanceavi4ervel4ie3atrirven4erv5er_t4nerer3vestat3uraeatit4e3atifeat5ieeat3ertmo4t5east5iat3urge1as1s3ryngoau5ceraud5ereas5erryth4iaudic4ear4tee5ar2rear4liear3ereap5eream3ersac4teeam4blea3logeal3eread3liead3ersain4teac4tedy4ad_sa5lacdwell3sa3lies4al4t5tletrdvert3sa5minault5id5un4cdum4be5tledrs4an4etlant4san5ifdu5ettau5reodu5elldu5eliau5rordrunk3tiv3isaus5erdri4g3aut3ars5ativti3tradrast4d5railsau5ciaut3erdossi4sa3voudo5simdon4atdom5itt3itisdomin5doman4tit5ildo4lonscar4cdol5ittith4edol3endo4c3u4s4ces5dlestt4istrdi4val1di1v2ditor3av3ageava5latish5idithe4av5alr3tisand4iterd4itas3disiadisen34d5irodi4oladi5nossec5andin5gisecon4dimet4di5mersed4itdi3gamdig3al3di3evdi4ersd5icurse3lecselen55dicul2s4emedic4tesemi5dav5antdic5oldic5amt3iristi5quaav3end5sentmti3pliav3ernti5omosep4side4voisep3tiser4antiol3aser4to4servode3vitde3visdev3ils5estade3tesdes3tid3est_sev3enaviol4aw5er_de3sidde3sectin3uetin4tedes4casfor5esfran5der5os3dero45dernesh4abiaw5ersder4miaw5nieay5sta3dererde5reg4deredde3raiderac4si4allsiast5tin3ets3icatdepen42s5icldeont5si5cul4tinedba5birdens5aside5lsid3enbalm5ideni4eba5lonsi4ersde1n2ade4mosde3morba5nan5tilindemo4nti4letsin5etbardi44demiedel5lisi5nolsi3nusba5romdeli4esi5o5sde3lat5de3isde4fy_bar3onde4cilsist3asist3otigi5odeb5itsit5omdeac3td3dlerd4derebas4tedaugh3dativ4dast5a3d4as2d1an4ts3kierba4th4sk5ily3baticba5tiod4a4gid5ache3ti2encys5toc3utivbat5on4cur4oti3diecur4er1c2ultb4batab4bonecul5abcu5itycub3atctro5tbcord4ti3colct5olo3smithbdeac5tic5asct5ivec4tityc4tituc3t2isbed5elc3tinict5ing4s3oid4te3loct4in_so5lansol4erso3lic3solvebe5dra5ti5bube3lit3some_bend5ac4ticsbe5nigson5atbicen5son5orc4tentbi4ers5soriosor4its5orizc2t5eec3tato5bilesct5antc5ta5gctac5u5c4ruscrost4spast45thoug3b2ill3sperms5pero4thoptcre4to5creti3spher4t5hoocre4p3sp5id_s5pierspil4lcre3atsp3ingspi5nith3oli4creancra4tecras3tbimet55crani5bin4d3spons3spoonspru5dbind3ecous5t3co3trth4is_srep5ucost3aco5rolco3rels5sam24coreds5sengs3sent5th4ioss3er_s5seriss3ers3thinkt5hillbin5etcon4iecon4eyth3eryss4in_s4siness4is_s3s2itss4ivicon4chth3ernco3mo4co5masssol3ut5herds4soreth5erc5colouco3logco3inc4c3oidco3difco3dicsta3bic4lotrs4talebin5i4s3tas_theo3lc3lingbi3re4ste5arste5atbi5rusbisul54s1teds4tedls4tedn4stereth5eas3bituas3terost5est5blastcine5a4cinabs3ti3a3sticks3ticuthal3ms4tilyst3ing5s4tir5cimenth5al_st3lercigar5ci3estch5ousstone3bla5tu5blespblim3as4tose4chotis4tray4chosostrep33strucstru5dbment4tew3arch5oid5chlorstur4echizz4ch3innch4in_ch3ily3chicoche5va3chetech4erltetr5och4eriche3olcha3pa4boledbon4iesu5ingces5trcest5oce3remcer4bites5tusu3pinsupra3sur4ascept3a5testesur3pltest3aboni4ft3ess_bon4spcent4ab3oratbor5eebor5etbor5icter5nobor5iocen5cice4metce5lomter3itt4erinsy4chrcel3aice3darcci3d4ter5ifsy5photer5idcav3ilter3iabot3an3tablica3t2rta3bolta4bout4a3cete3reota3chyta4cidc4atom3casu35t2adjta5dor5terel3cas3scashi4tage5ota5gogca3roucar5oocar5oncar3olcar3nicar3ifter5ecca3reeter3ebta5lept4aliat4alin2tere45tallut2alo43ter3bt4eragtera4c3brachtan5atbran4db4reas5taneltan5iet5aniz4b2rescap3tica5piltent4atark5ican4trte5nog5brief5tennaca3noec2an4eta3stabring5t4ateu3tatist4ato_tat4ouca5nartat3uttau3tobri4osca5lefcal5ar4tenarcab5inb5ut5obut4ivten4ag3butiob5utinbu5tarte5cha5technbus5sibusi4ete5d2abur4rite5monb4ulosb5rist5tegicb5tletbro4mab4stacbso3lubsol3e4teledtel5izbscon4ct4ina",7:"mor4atobstupe5buf5ferb5u5nattch5ettm3orat4call5inmor5talcan5tarcan5tedcan4tictar5ia_brev5ettant5anca3ra5ctand5er_ad4din5ta3mettam5arit4eratocar5ameboun5tital4l3atal5entmonolo4cas5tigta5chom3teres4ta5blemcaulk4iccent5rcces4sacel5ib5mpel5licel5lincen5ded5ternit4sweredswell5icend5encend5ersvest5isvers5acen5tedt5esses_ama5tem5perercen5testest5ertest5intest5orcep5ticmpet5itchan5gi5cherin4choredchor5olmphal5os5toratblem5atston4iecil5lin4mologu4mologss4tern_ster4iaci5nesscla5rifclemat45static4molog_5therapmogast4ssolu4b4theredcon4aticond5erconta5dcor5dedcord5ermpol5itcost5ercraft5ispon5gicra5niuspital5spic5ulspers5a4thorescret5orspens5ac5tariabi4fid_4sor3iecter4iab5ertinberga5mc5ticiabend5erso5metesoma5toctifi4esolv5erc5tin5o_an4on_ct4ivittici5ar3ti3cint4icityc5torisc5toriz4ticulecull5ercull5inbattle5cur5ialmmel5lislang5idal5lersk5iness5kiest4tific_daun5tede5cantdefor5edel5ler_an3ti34dem4issim4plyb4aniti_ant4icde4mons_an4t5osid5eri5timet4dens5er5ti5nadden5titdeposi4zin4c3i_aph5orshil5lider5minsfact5otin5tedtint5erde5scalmis4tindes5ponse5renedevol5u4tionemdiat5omti5plexseo5logsent5eemi5racu_ar4isedic5tat4scuras4scura__ar4isi5scopic3s4cope5t4istedi5vineti5t4ando5linesca5lendom5inodot4tins5atorydress5oaus4tedtiv5allsassem4dropho4duci5ansant5risan5garaun4dresan4ded_ar5sendust5erault5erdvoc5ataul5tedearth5iea4soni4ryngoleassem4eat5enieat4iturv5ers_rus4t5urus5ticrust5eeatric5urust5at_as5sibrup5licminth5oecad5enruncul5ru4moreecent5oa5tivizecon4sc_ateli4_au3g4uec5rean_aur4e5ect5atiec4t5usrtil5le4at4is__av5erar4theneedeter5edi4alsr5terered5icala4t1i4lediges4at5icizediv5idtori4asrswear4ati5citat5icisedu5cerrstrat4eer4ineefact5oming5li_ba5sicef5ereemin4ersath5eteath5eromin4er__be5r4ae5ignitr5salizmind5err5salisejudic44traistmil5iestrarch4tra5ven_blaz5o4m5iliee4lates_bos5omat5enatelch5errrin5getrend5irri4fy_rran5gie4lesteel3et3o_boun4d_bra5chtri5fli_burn5ieli4ers_ca4ginrou5sel_can5tamigh5tiros5tita5talisro5stattro4pharop4ineemarc5aem5atizemat5ole4m3eraron4tonro5nateem4icisnaffil4romant4emig5rarol5iteass5iblassa5giemon5ola4sonedem5orise4moticempara54empli_en3am3o_cen5sot5tereren4cileen4d5alen4dedlttitud45n4a3grend5ritrn5atine5nellee5nereor4mite_r4ming_en3ig3rmet5icirma5tocr4m3atinannot4en4tersen4tifyarp5ersent5rinr5kiesteol5ar_eologi4aro4mas_clem5eriv5eliri5vallris5ternan5teda5rishi3mesti4epolit5tup5lettup5lic_cop5roepres5erink5erme5si4aring5ie_co5terrim5an4equi5noment5or4tut4ivna5turiera4cierig5ant5rifugaar4donear5dinarif5tiear5chetrift5er4erati_4eratimrick4enrich5omrica5tuaran5teer5esteer5estieres5trre5termar4aged_dea5coaract4irest5erre5stalapu5lareri4ciduant5isuant5itres5ist5er5ickapo5strer4imet_de5lecuar4t5iua5terneri5staren4ter5ernaclmend5errem5atoreman4d_del5egerre5laer5sinere5galiert5er_ert5ersrec4t3rr4e1c2rreci5simelt5er_deli5ran4tone_de5nitan4tinges5idenesi5diur4d1an4rcriti4es3ol3urci5nogant5abludi4cinrch4ieru5dinisrch5ateu5ditiorch5ardes3per3mel5lerrcen5eres5piraanis5teesplen5uen4teres4s3anest5ifi_de5resues5trin4cept_rav5elianel5li4r4atom5ra5tolan4donirat4in_r4as5teand5istrass5in5meg2a1et3al5oand5eerrar5ia_an3d4atrant5inuicent55rantelran5teduild5erran4gennch5oloetell5irad4inencid5enra5culorac5ulaet3er3aet5eria3ra3binet5itivui5val5amphi5gam5peri_de5sirqua5tio4e4trala4mium_et5ressetrib5aaminos4am5inizamini4fp5u5tis5ulchrepush4ieev5eratev5eren4ulenciever4erpu5lar_puff5erevictu4evis5in_de5sisfall5inncip5ie_di4al_fend5erpros5trpropyl5proph5eul4l5ibp3roc3apris5inpring5imbival5nco5pat5pressiyllab5iulp5ingpre5matylin5dem4b3ingnct4ivife5veriffec4te_du4al_pprob5am5bererum4bar__echin5fi5anceal5tatipparat5pout5ern4curviumi5liaumin4aru4minedu4m3ingpoult5epor5tieal4orim4poratopon4i4eflo5rical4lish_ed4it_foment4_ed4itialli5anplum4befor4m3a_el3ev3fratch4pla5t4oma5turem4atizafrost5ipis5tilmat4itifuel5ligal5lerpill5ingang5ergariz4aunho5lial5ipotgass5inph5oriz4phonedgest5atg5gererphant5ipha5gedgiv5en_5glass_unk5eripet5allal5endepes5tilpert5isper5tinper4os_al5ance5p4er3nperem5indeleg4gna5turndepre4aint5eruodent4pend5er4gogram_en4dedpearl5indes5crgth5enimas4tinpat4richad4inepas4tinnd5is4ihak4inehal5anthan4crohar5dieha5rismhar4tedaet4or_aerody5pag4atihaught5_er5em5hearch44urantiheav5enurb5ingoxic5olowhith4ur5den_ur5deniowel5lih5erettovid5ennd5ism_her5ialh5erineout5ishoun5ginound5elhet4tedact5oryu5ri5cuheumat5ur5ifieact5ileought5ihi3c4anuri4os_h4i4ersh4manicurl5ingact5atemast4ichnocen5_men5taaci4erso5thermmar4shimantel5ot5estaurpen5tach5isma5chinihol4is_ot4atioot4anico5talito5stome5acanthost5icaosten5tost5ageh4op4te3house3hras5eoy4chosen5ectom4abolicht5eneror5tes_man4icay5chedei5a4g5oori5cidialect4or5este_escal5iatur4aorator5_wine5s_vo5lutich5ingo5quial_etern5us5ticiic4tedloplast4ophy5laid4ines4operag2i4d1itoost5eriff5leronvo5lui4ficaconti5fiman5dar_vic5to_fal4lemament4mal4is__ver4ieila5telonical4i5later_feoff5ili4arl_va5ledil4ificond5ent_ur5eth5ond5arut4toneil5ine_on5ativonast5i_under5ompt5eromot5ivi4matedi4matin_fi5liaimpar5a_fil5tro5lunte4inalit_tular5olon5el5neringinator5_tro4ph_fis4c5inc4tua_trin4aol4lopeoli4f3eol5ies_mal5ari_tran4c_tit4isnerv5inval4iseol5icizinfilt5olat5erin4itud_gam5etxter4m3ink4inein4sch5_tell5evas5el5insect5insec5uinsolv5int5essvat4inaoher4erint5res_tamar5xtens5o_tact4iinvol5ui4omani_gen4et_gen5iave5linei5pheriip5torivel5lerir4alinvel5opiir4alliirassi4nfortu5irl5ingirwo4meo4ducts4lut5arv5en5ue_stat4o_si5gnoverde5v4v4ere4o4duct_odu5cerodis5iaocus5siis5onerist5encxotrop4_ser4ie5vialitist5entochro4n_gnost4_sec5tovi5cariocess4iis4t3iclum4brio5calli4is4tom4itioneit5ress3vili4av5ilisev5ilizevil5linoast5eritu4als_han4de_hast5ii4vers__sa5linlsi4fiai5vilit5ivist_5ivistsnvoc5at_ho5rol_rol4lakinema4ni4cul4nultim5_re5strloth4ie5la5collos5sienight5ilor4ife_re5spolor5iatntup5li5lo5pen_re5sen_res5ci_re5linnt5ressn4trant_re5garloom5erxhort4a_ran5gilong5invol4ubi_ra5cem_put4ten5tition4tiparlo4cus__pos5si_lash4e_len5tint5ing_nit5res_le5vanxecut5o_plica4n4tify__plast45latini_phon4illow5er_li4onslligat4_peri5nntic4u4_pen5dewall5ern5ticizwan5gliwank5erwar5dedward5ern5ticisnth5ine_lo4giawar5thinmater4_pec3t4_pa4tiowav4ine_lous5i_para5t_par5af_lov5ernmor5ti_orner4nt5ativ_or5che_ma5lin_mar5ti_or4at4le5ation5tasiswel4izint4ariun4t3antntan5eon4t3ancleav5erl3eb5rannel5li_nucle5_no5ticlem5enclen5darwill5in_ni5tronsec4tewing5er4lentio5l4eriannerv5a_nas5tinres5tr5le5tu5lev5itano5blemnovel5el3ic3onwol5ver_mor5tilift5erlight5ilimet4e_mo5lec5lin3ealin4er_lin4erslin4gern5ocula_min5uenobser4_met4er_me5rin_me5ridmas4ted",8:"_musi5cobserv5anwith5erilect5icaweight5ica5laman_mal5ad5l5di5nestast5i4cntend5enntern5alnter5nat_perse5c_pe5titi_phe5nomxe5cutio5latiliz_librar5nt5ilati_les5son_po5lite_ac5tiva5latilisnis5tersnis5ter_tamorph5_pro5batvo5litiolan5tine_ref5eremophil5ila5melli_re5statca3r4i3c5lamandrcen5ter_5visecti5numentanvers5aniver5saliv5eling_salt5ercen5ters_ha5bilio4c5ativlunch5eois5terer_sev5era_glor5io_stra5tocham5perstor5ianstil5ler_ge5neti_sulph5a_tac5ticnform5eroin4t5erneuma5to_te5ra5tma5chinecine5mat_tri5bal_fran5ch_tri5sti_fi5n4it_troph5o_fin5essimparad5stant5iv_vent5il4o5nomicssor5ialight5ersight5er__evol5utm5ament_ont5ane_icotyle5orest5atiab5oliziab5olismod5ifiehrill5inothalam5oth5erinnduct5ivrth5ing_otherm5a5ot5inizov5elinghav5ersipass5ivessent5ermater5n4ain5dersuo5tatiopens5atipercent5slav5eriplant5er5sing5erfortu5naplumb5erpo5lemicpound5erffranch5ppress5oa5lumnia_domest5pref5ereprel5atea5marinepre5scina5m4aticpring5ertil4l5agmmand5er5sid5u4a_de5spoievol5utee5tometeetend5erting5ingmed5icatran5dishm5ed5ieset5allis_de5servsh5inessmlo5cutiuest5ratncent5rincarn5atdes5ignareact5ivr5ebratereced5ennbarric5sen5sorier5nalisuar5tersre4t4er3_custom5naugh5tirill5er_sen5sati5scripti_cotyle5e4p5rob5a5ri5netaun5chierin4t5errip5lica_art5icl5at5ressepend5entu4al5lir5ma5tolttitu5di_cent5ria5torianena5ture5na5geri_cas5ualromolec5elom5ateatitud5i_ca5pituround5ernac5tiva_at5omizrpass5intomat5oltrifu5gae4l3ica4rpret5erel5ativetrav5esttra5versat5ernisat5ernizefor5estath5erinef5initeto5talizto5talis_barri5c_authen5mass5ing",9:"_bap5tismna5cious_econstit5na5ciousl_at5omisena5culari_cen5tena_clima5toepe5titionar5tisti_cri5ticirill5ingserpent5inrcen5tenaest5igati_de5scrib_de5signe_determ5ifals5ifiefan5tasizplas5ticiundeter5msmu5tatiopa5triciaosclero5s_fec5unda_ulti5matindeterm5ipart5ite_string5i5lutionizltramont5_re5storeter5iorit_invest5imonolog5introl5ler_lam5enta_po5sitio_para5dis_ora5tori_me5lodio"}};Hyphenator.config({useCSS3hyphenation:true});Hyphenator.run(); | PypiClean |
/BRAILS-3.0.1.tar.gz/BRAILS-3.0.1/brails/modules/FoundationClassifier/npid/npid_toolbox/calculate_all_distances.py | from MDistance import Distance_Eval
from PIL import Image
import os
import pandas as pd
import numpy as np
import cv2
def calc_to_csv(root_path,csv_filename,evaluator):
distances = []
for root, dirs, files in os.walk(root_path,followlinks=True):
print ("Traversing {}".format(root))
for file_ in files:
filename = os.path.join(root, file_)
print (file_)
if filename.endswith('jpg'):
with open(filename, 'rb') as f:
img = Image.open(f)
distance, features = evaluator.get_distance_and_features(img)
distances.append({'filename':filename, 'distance':distance, 'features':features.squeeze()})
distances_pd = pd.DataFrame(distances)
distances_pd.to_csv(csv_filename, sep=',',index=False)
if __name__ == '__main__':
# Checkpoint
evaluator = Distance_Eval('/home/saschaho/Simcenter/lemniscate.pytorch/checkpoint.pth.tar')
#evaluator = Distance_Eval('/home/saschaho/Simcenter/lemniscate.pytorch/lemniscate_resnet50.pth.tar')
csv_filename = '/home/saschaho/Simcenter/lemniscate.pytorch/distances.csv'
root_path = '/home/saschaho/Simcenter/copy_for_zhirong/Florida/'
recalculate_distances = True
if recalculate_distances: calc_to_csv(root_path,csv_filename,evaluator)
pkl_save_path = '/home/saschaho/Simcenter/copy_for_zhirong/pkl_stat_data'
pkl_stats_file = '/home/saschaho/Simcenter/copy_for_zhirong/stats.pkl'
### Calculate and show distance images
distances = pd.read_csv(csv_filename)
closest_ten = distances.sort_values(by='distance').iloc[0:50]
furthest_ten = distances.sort_values(by='distance').iloc[-70:]
for i, image in enumerate(furthest_ten.iterrows()):
image = image[1] # Some index number is at position 0
img_path = image['filename']
img_name = os.path.basename(img_path).split('.')[0]
print ("Processing {}".format(img_path))
img_default_dist = image['distance']
with open(img_path, 'rb') as f:
img = Image.open(f)
img_array = np.array(img)
final_img = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
#dist_at_pos_pd = evaluator.image_check(img_array, mode='peephole')
#final_img = annotate_img(img_array=img_array, dist_at_pos_pd=dist_at_pos_pd, default_dist=img_default_dist)
#cv2.imwrite('furthest_{}.png'.format(i+50),final_img)
cv2.imshow('test',final_img)
cv2.waitKey(1000)
print (i) | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/utils/views.py | """Helper methods for views."""
import os
from time import mktime
from typing import Optional
from zipfile import ZipFile
from django.conf import settings
from django.core.paginator import EmptyPage, Paginator
from django.http import FileResponse, Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.http import http_date
from django.utils.translation import activate
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy, pgettext_lazy
from django.views.decorators.gzip import gzip_page
from django.views.generic.edit import FormView
from weblate.formats.models import EXPORTERS, FILE_FORMATS
from weblate.trans.models import Component, Project, Translation
from weblate.utils import messages
from weblate.utils.errors import report_error
from weblate.vcs.git import LocalRepository
SORT_KEYS = {
"name": lambda x: x.name if hasattr(x, "name") else x.component.name,
"translated": lambda x: x.stats.translated_percent,
"untranslated": lambda x: x.stats.todo,
"untranslated_words": lambda x: x.stats.todo_words,
"untranslated_chars": lambda x: x.stats.todo_chars,
"nottranslated": lambda x: x.stats.nottranslated,
"checks": lambda x: x.stats.allchecks,
"suggestions": lambda x: x.stats.suggestions,
"comments": lambda x: x.stats.comments,
}
def optional_form(form, perm_user, perm, perm_obj, **kwargs):
if not perm_user.has_perm(perm, perm_obj):
return None
return form(**kwargs)
def get_percent_color(percent):
if percent >= 85:
return "#2eccaa"
if percent >= 50:
return "#38f"
return "#f6664c"
def get_page_limit(request, default):
"""Return page and limit as integers."""
try:
limit = int(request.GET.get("limit", default))
except ValueError:
limit = default
# Cap it to range 10 - 2000
limit = min(max(10, limit), 2000)
try:
page = int(request.GET.get("page", 1))
except ValueError:
page = 1
page = max(1, page)
return page, limit
def sort_objects(object_list, sort_by: str):
if sort_by.startswith("-"):
sort_key = sort_by[1:]
reverse = True
else:
sort_key = sort_by
reverse = False
try:
key = SORT_KEYS[sort_key]
except KeyError:
return object_list, None
return sorted(object_list, key=key, reverse=reverse), sort_by
def get_paginator(request, object_list, page_limit=None):
"""Return paginator and current page."""
page, limit = get_page_limit(request, page_limit or settings.DEFAULT_PAGE_LIMIT)
sort_by = request.GET.get("sort_by")
if sort_by:
object_list, sort_by = sort_objects(object_list, sort_by)
paginator = Paginator(object_list, limit)
paginator.sort_by = sort_by
try:
return paginator.page(page)
except EmptyPage:
return paginator.page(paginator.num_pages)
class ComponentViewMixin:
# This should be done in setup once we drop support for older Django
def get_component(self):
return get_component(
self.request, self.kwargs["project"], self.kwargs["component"]
)
class ProjectViewMixin:
project = None
# This should be done in setup once we drop support for older Django
def dispatch(self, request, *args, **kwargs):
self.project = get_project(self.request, self.kwargs["project"])
return super().dispatch(request, *args, **kwargs)
SORT_CHOICES = {
"-priority,position": gettext_lazy("Position and priority"),
"position": gettext_lazy("Position"),
"priority": gettext_lazy("Priority"),
"labels": gettext_lazy("Labels"),
"source": gettext_lazy("Source string"),
"target": gettext_lazy("Target string"),
"timestamp": gettext_lazy("String age"),
"num_words": gettext_lazy("Number of words"),
"num_comments": gettext_lazy("Number of comments"),
"num_failing_checks": gettext_lazy("Number of failing checks"),
"context": pgettext_lazy("Translation key", "Key"),
}
SORT_LOOKUP = {key.replace("-", ""): value for key, value in SORT_CHOICES.items()}
def get_sort_name(request, obj=None):
"""Gets sort name."""
if hasattr(obj, "component") and obj.component.is_glossary:
default = "source"
else:
default = "-priority,position"
sort_query = request.GET.get("sort_by", default)
sort_params = sort_query.replace("-", "")
sort_name = SORT_LOOKUP.get(sort_params, _("Position and priority"))
return {
"query": sort_query,
"name": sort_name,
}
def get_translation(request, project, component, lang, skip_acl=False):
"""Return translation matching parameters."""
translation = get_object_or_404(
Translation.objects.prefetch(),
language__code=lang,
component__slug=component,
component__project__slug=project,
)
if not skip_acl:
request.user.check_access_component(translation.component)
return translation
def get_component(request, project, component, skip_acl=False):
"""Return component matching parameters."""
component = get_object_or_404(
Component.objects.prefetch(),
project__slug=project,
slug=component,
)
if not skip_acl:
request.user.check_access_component(component)
component.acting_user = request.user
return component
def get_project(request, project, skip_acl=False):
"""Return project matching parameters."""
project = get_object_or_404(Project, slug=project)
if not skip_acl:
request.user.check_access(project)
project.acting_user = request.user
return project
def get_project_translation(request, project=None, component=None, lang=None):
"""Return project, component, translation tuple for given parameters."""
if lang and component:
# Language defined? We can get all
translation = get_translation(request, project, component, lang)
component = translation.component
project = component.project
else:
translation = None
if component:
# Component defined?
component = get_component(request, project, component)
project = component.project
elif project:
# Only project defined?
project = get_project(request, project)
# Return tuple
return project or None, component or None, translation or None
def guess_filemask_from_doc(data):
if "filemask" in data:
return
ext = ""
if "docfile" in data and hasattr(data["docfile"], "name"):
ext = os.path.splitext(os.path.basename(data["docfile"].name))[1]
if not ext and "file_format" in data and data["file_format"] in FILE_FORMATS:
ext = FILE_FORMATS[data["file_format"]].extension()
data["filemask"] = "{}/{}{}".format(data.get("slug", "translations"), "*", ext)
def create_component_from_doc(data):
# Calculate filename
uploaded = data["docfile"]
guess_filemask_from_doc(data)
filemask = data["filemask"]
filename = filemask.replace(
"*",
data["source_language"].code
if "source_language" in data
else settings.DEFAULT_LANGUAGE,
)
# Create fake component (needed to calculate path)
fake = Component(
project=data["project"],
slug=data["slug"],
name=data["name"],
template=filename,
filemask=filemask,
)
# Create repository
LocalRepository.from_files(fake.full_path, {filename: uploaded.read()})
return fake
def create_component_from_zip(data):
# Create fake component (needed to calculate path)
fake = Component(
project=data["project"],
slug=data["slug"],
name=data["name"],
)
# Create repository
LocalRepository.from_zip(fake.full_path, data["zipfile"])
return fake
def try_set_language(lang):
"""Try to activate language."""
try:
activate(lang)
except Exception:
# Ignore failure on activating language
activate("en")
def import_message(request, count, message_none, message_ok):
if count == 0:
messages.warning(request, message_none)
else:
messages.success(request, message_ok % count)
def iter_files(filenames):
for filename in filenames:
if os.path.isdir(filename):
for root, _unused, files in os.walk(filename):
if "/.git/" in root or "/.hg/" in root:
continue
yield from (os.path.join(root, name) for name in files)
else:
yield filename
def zip_download(root, filenames, name="translations"):
response = HttpResponse(content_type="application/zip")
with ZipFile(response, "w") as zipfile:
for filename in iter_files(filenames):
try:
with open(filename, "rb") as handle:
zipfile.writestr(os.path.relpath(filename, root), handle.read())
except FileNotFoundError:
continue
response["Content-Disposition"] = f'attachment; filename="{name}.zip"'
return response
@gzip_page
def download_translation_file(
request,
translation: Translation,
fmt: Optional[str] = None,
query_string: Optional[str] = None,
):
if fmt is not None:
try:
exporter_cls = EXPORTERS[fmt]
except KeyError:
raise Http404("File format not supported")
if not exporter_cls.supports(translation):
raise Http404("File format not supported")
exporter = exporter_cls(translation=translation)
units = translation.unit_set.prefetch_full().order_by("position")
if query_string:
units = units.search(query_string)
exporter.add_units(units)
response = exporter.get_response(
"{{project}}-{0}-{{language}}.{{extension}}".format(
translation.component.slug
)
)
else:
# Force flushing pending units
try:
translation.commit_pending("download", None)
except Exception:
report_error(cause="Download commit")
filenames = translation.filenames
if len(filenames) == 1:
extension = (
os.path.splitext(translation.filename)[1]
or f".{translation.component.file_format_cls.extension()}"
)
if not os.path.exists(filenames[0]):
raise Http404("File not found")
# Create response
response = FileResponse(
open(filenames[0], "rb"),
content_type=translation.component.file_format_cls.mimetype(),
)
else:
extension = ".zip"
response = zip_download(
translation.get_filename(),
filenames,
translation.full_slug.replace("/", "-"),
)
# Construct filename (do not use real filename as it is usually not
# that useful)
project_slug = translation.component.project.slug
component_slug = translation.component.slug
language_code = translation.language.code
filename = f"{project_slug}-{component_slug}-{language_code}{extension}"
# Fill in response headers
response["Content-Disposition"] = f"attachment; filename={filename}"
if translation.stats.last_changed:
response["Last-Modified"] = http_date(
mktime(translation.stats.last_changed.timetuple())
)
return response
def get_form_errors(form):
for error in form.non_field_errors():
yield error
for field in form:
for error in field.errors:
yield _("Error in parameter %(field)s: %(error)s") % {
"field": field.name,
"error": error,
}
def show_form_errors(request, form):
"""Show all form errors as a message."""
for error in get_form_errors(form):
messages.error(request, error)
class ErrorFormView(FormView):
def form_invalid(self, form):
"""If the form is invalid, redirect to the supplied URL."""
show_form_errors(self.request, form)
return HttpResponseRedirect(self.get_success_url())
def get(self, request, *args, **kwargs):
"""There is no GET view here."""
return HttpResponseRedirect(self.get_success_url()) | PypiClean |
/MeUtils-2023.8.29.13.9.44-py3-none-any.whl/meutils/fileparser/filetype/types/video.py |
from __future__ import absolute_import
from .base import Type
from .isobmff import IsoBmff
class Mp4(IsoBmff):
"""
Implements the MP4 video type matcher.
"""
MIME = 'video/mp4'
EXTENSION = 'mp4'
def __init__(self):
super(Mp4, self).__init__(
mime=Mp4.MIME,
extension=Mp4.EXTENSION
)
def match(self, buf):
if not self._is_isobmff(buf):
return False
major_brand, minor_version, compatible_brands = self._get_ftyp(buf)
for brand in compatible_brands:
if brand in ['mp41', 'mp42', 'isom']:
return True
return major_brand in ['mp41', 'mp42', 'isom']
class M4v(Type):
"""
Implements the M4V video type matcher.
"""
MIME = 'video/x-m4v'
EXTENSION = 'm4v'
def __init__(self):
super(M4v, self).__init__(
mime=M4v.MIME,
extension=M4v.EXTENSION
)
def match(self, buf):
return (len(buf) > 10 and
buf[0] == 0x0 and buf[1] == 0x0 and
buf[2] == 0x0 and buf[3] == 0x1C and
buf[4] == 0x66 and buf[5] == 0x74 and
buf[6] == 0x79 and buf[7] == 0x70 and
buf[8] == 0x4D and buf[9] == 0x34 and
buf[10] == 0x56)
class Mkv(Type):
"""
Implements the MKV video type matcher.
"""
MIME = 'video/x-matroska'
EXTENSION = 'mkv'
def __init__(self):
super(Mkv, self).__init__(
mime=Mkv.MIME,
extension=Mkv.EXTENSION
)
def match(self, buf):
contains_ebml_element = buf.startswith(b'\x1A\x45\xDF\xA3')
contains_doctype_element = buf.find(b'\x42\x82\x88matroska') > -1
return contains_ebml_element and contains_doctype_element
class Webm(Type):
"""
Implements the WebM video type matcher.
"""
MIME = 'video/webm'
EXTENSION = 'webm'
def __init__(self):
super(Webm, self).__init__(
mime=Webm.MIME,
extension=Webm.EXTENSION
)
def match(self, buf):
contains_ebml_element = buf.startswith(b'\x1A\x45\xDF\xA3')
contains_doctype_element = buf.find(b'\x42\x82\x84webm') > -1
return contains_ebml_element and contains_doctype_element
class Mov(IsoBmff):
"""
Implements the MOV video type matcher.
"""
MIME = 'video/quicktime'
EXTENSION = 'mov'
def __init__(self):
super(Mov, self).__init__(
mime=Mov.MIME,
extension=Mov.EXTENSION
)
def match(self, buf):
if not self._is_isobmff(buf):
return False
major_brand, minor_version, compatible_brands = self._get_ftyp(buf)
return major_brand == 'qt '
class Avi(Type):
"""
Implements the AVI video type matcher.
"""
MIME = 'video/x-msvideo'
EXTENSION = 'avi'
def __init__(self):
super(Avi, self).__init__(
mime=Avi.MIME,
extension=Avi.EXTENSION
)
def match(self, buf):
return (len(buf) > 11 and
buf[0] == 0x52 and
buf[1] == 0x49 and
buf[2] == 0x46 and
buf[3] == 0x46 and
buf[8] == 0x41 and
buf[9] == 0x56 and
buf[10] == 0x49 and
buf[11] == 0x20)
class Wmv(Type):
"""
Implements the WMV video type matcher.
"""
MIME = 'video/x-ms-wmv'
EXTENSION = 'wmv'
def __init__(self):
super(Wmv, self).__init__(
mime=Wmv.MIME,
extension=Wmv.EXTENSION
)
def match(self, buf):
return (len(buf) > 9 and
buf[0] == 0x30 and
buf[1] == 0x26 and
buf[2] == 0xB2 and
buf[3] == 0x75 and
buf[4] == 0x8E and
buf[5] == 0x66 and
buf[6] == 0xCF and
buf[7] == 0x11 and
buf[8] == 0xA6 and
buf[9] == 0xD9)
class Flv(Type):
"""
Implements the FLV video type matcher.
"""
MIME = 'video/x-flv'
EXTENSION = 'flv'
def __init__(self):
super(Flv, self).__init__(
mime=Flv.MIME,
extension=Flv.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x46 and
buf[1] == 0x4C and
buf[2] == 0x56 and
buf[3] == 0x01)
class Mpeg(Type):
"""
Implements the MPEG video type matcher.
"""
MIME = 'video/mpeg'
EXTENSION = 'mpg'
def __init__(self):
super(Mpeg, self).__init__(
mime=Mpeg.MIME,
extension=Mpeg.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x0 and
buf[1] == 0x0 and
buf[2] == 0x1 and
buf[3] >= 0xb0 and
buf[3] <= 0xbf)
class M3gp(Type):
"""Implements the 3gp image type matcher."""
MIME = 'video/3gpp'
EXTENSION = '3gp'
def __init__(self):
super(M3gp, self).__init__(
mime=M3gp.MIME,
extension=M3gp.EXTENSION
)
def match(self, buf):
return buf[:7] == bytearray([0x66, 0x74, 0x79, 0x70, 0x33, 0x67, 0x70]) | PypiClean |
/frmt-2.0.0.tar.gz/frmt-2.0.0/README.rst | frmt
====
.. image:: https://travis-ci.com/sigvaldm/frmt.svg?branch=master
:target: https://travis-ci.com/sigvaldm/frmt
.. image:: https://coveralls.io/repos/github/sigvaldm/frmt/badge.svg?branch=master
:target: https://coveralls.io/github/sigvaldm/frmt?branch=master
.. image:: https://img.shields.io/pypi/pyversions/frmt.svg
:target: https://pypi.org/project/Frmt
frmt is a pretty-printing library for tables and times. The core philosophy is that it should work with minimal hassle, yet offer flexibility through elegant, lightweight design (less than 150 lines of code excluding comments).
The library consist of the following functions:
======================= ====================== ==========================================================================================
``format_table()`` ``print_table()`` Format a list of lists as a table.
``format_time_large()`` ``print_time_large()`` Format seconds to ``dd:hh:mm:ss``.
``format_time_small()`` ``print_time_small()`` Format seconds using SI-prefix and 3 significant figures, e.g. ``3.45ms``.
``format_time()`` ``print_time()`` Same as ``*_time_small()`` for less than 60 seconds, same as ``*_time_large()`` otherwise.
``format_fit()`` ``print_fit()`` Fit and align text within a width.
======================= ====================== ==========================================================================================
The ``format_*()`` functions returns a formatted string, whereas the ``print_*()`` functions are simple wrappers around the ``format_*()`` functions that prints the return string.
Installation
------------
Install from PyPI using ``pip`` (preferred method)::
pip install frmt
Or download the GitHub repository https://github.com/sigvaldm/frmt.git and run::
python setup.py install
``*_table()``
-------------
Signature::
*_table(table,
align='<',
format='{:.3g}',
colwidth=None,
maxwidth=None,
spacing=2,
truncate=0,
suffix="..."
)
The ``*_table()`` functions formats a table represented as a list of lists. Consider this example using a table of grades from 1.0 (best) to 6.0 (worst)::
>>> from frmt import print_table
>>> grades = [['' , 'Math', 'English', 'History', 'Comment' ],
... ['Bob' , 1.2 , 2.1 , 5.9 , 'Failed at history'],
... ['Jane' , 2.4 , 1.1 , 1.4 , 'Quite good' ],
... ['Trevor', 2.2 , 4.4 , 3.2 , 'Somewhat average' ]]
>>> print_table(grades)
Math English History Comment
Bob 1.2 2.1 5.9 Failed at history
Jane 2.4 1.1 1.4 Quite good
Trevor 2.2 4.4 3.2 Somewhat average
The functions also work with other kinds of iterables of iterables, for instance NumPy arrays. It also supports custom alignment and formatting for each individual cell.
Alignment
~~~~~~~~~
The ``align`` parameter can be used to change cell alignment:
* ``'<'`` - Left aligned (default)
* ``'^'`` - Centered
* ``'>'`` - Right aligned
It is also possible to have different alignments for different columns by having one character for each column. For instance, to have the first column left aligned and the subsequent four right aligned, set ``align`` to ``'<>>>>'`` or, equivalently, to ``'<>'``::
>>> print_table(grades, '<>')
Math English History Comment
Bob 1.2 2.1 5.9 Failed at history
Jane 2.4 1.1 1.4 Quite good
Trevor 2.2 4.4 3.2 Somewhat average
Note that if only some columns are specified, the last specified alignment is repeated. This is useful typically when the left column is text and the remaining columns are numbers (although here it would be better to left align the rightmost column). This pattern of "repeating the last" is a core philosophy used throughout frmt to achieve flexibility.
It is also possible to have different alignments for different *rows* by having a list of alignment strings for each row. Again, if not all rows are specified, the last alignment string in the list is repeated for subsequent rows. For instance::
>>> print_table(grades, ['^','<^^^<'])
Math English History Comment
Bob 1.2 2.1 5.9 Failed at history
Jane 2.4 1.1 1.4 Quite good
Trevor 2.2 4.4 3.2 Somewhat average
On the header row all cells are centered (``'^'``). On the subsequent rows the leftmost column is left aligned, the three next ones are centered, and the last is also left aligned (``'<^^^<'``).
Cell formatting
~~~~~~~~~~~~~~~
The ``format`` parameter can be used to format the cell contents. By default the `format string`_ ``'{:.3g}'`` is used to format numbers. This is a reasonable default, but often one would like to tune the formatting. For instance if we do not wish to display decimals in the above grading example, it can be easily achieved::
>>> print_table(grades, format='{:.0f}')
Math English History Comment
Bob 1 2 6 Failed at history
Jane 2 1 1 Quite good
Trevor 2 4 3 Somewhat average
``format`` also accepts a function as an input to allow for greater flexibility. As an example, consider formatting the grades as letters::
>>> def letter_grade(x):
... return 'ABCDEF'[int(round(x))-1]
>>> print_table(grades, format=letter_grade)
Math English History Comment
Bob A B F Failed at history
Jane B A A Quite good
Trevor B D C Somewhat average
The function ``letter_grade()`` throws a ``TypeError`` when applied to for instance "Bob", so ``print_table()`` will not use it for "Bob". Likewise for format strings; when using them on some cell content would result in an exception, ``print_table()`` resorts to using ``str()`` on it.
Following a pattern similar to ``align``, different format strings/functions can be applied to different columns by putting them in a list. The last specified format string/function will be repeated for all subsequent columns. One can also specify different format strings/functions for different *rows*. In that case the lists are nested; a list with one list for each row. For example, to uppercase the header row::
>>> def str_upper(s):
... return s.upper()
>>> print_table(grades, format=[[str_upper],[letter_grade]])
MATH ENGLISH HISTORY COMMENT
Bob A B F Failed at history
Jane B A A Quite good
Trevor B D C Somewhat average
Using the ``format`` option is not the only, and not always the best way to format the cell contents. Sometimes it may be just as good to format the cell contents before passing it to ``*_table()``, like in this example::
>>> measurements = \
... [[0.0, 0.16159999923218293, 0.05832942704771176],
... [0.001, 0.5415871693699631, 0.1038533048639953],
... [0.002, 1.0020586304683154, 0.06263011126285473],
... [0.003, 1.6493888138044273, 0.1633588946456795],
... [0.004, 2.158470579371153, 0.16602352409683588],
... [0.005, 2.543489191597334, 0.18539040280004443],
... [0.006, 3.1235687589204497, 0.24946423631204423],
... [0.007, 3.6155358393212573, 0.19856685230794482],
... [0.008, 4.111913772930216, 0.19223623526732384],
... [0.009000000000000001, 4.505017235628538, 0.20666111673691043],
... [0.01, 5.0961076665212595, 0.1259131288654157]]
>>> for row in measurements:
... row[0] = '{:.1f}ms'.format(row[0]*1e3)
... row[1] = '{:.1f}V'.format(row[1])
... row[2] = '{:.0f}mA'.format(row[2]*1e3)
>>> header = ['Time', 'Voltage', 'Current']
>>> measurements.insert(0, header)
>>> print_table(measurements, '>')
Time Voltage Current
0.0ms 0.2V 58mA
1.0ms 0.5V 104mA
2.0ms 1.0V 63mA
3.0ms 1.6V 163mA
4.0ms 2.2V 166mA
5.0ms 2.5V 185mA
6.0ms 3.1V 249mA
7.0ms 3.6V 199mA
8.0ms 4.1V 192mA
9.0ms 4.5V 207mA
10.0ms 5.1V 126mA
_`format string` https://docs.python.org/3.7/library/string.html#format-string-syntax
Width and spacing
~~~~~~~~~~~~~~~~~
The ``colwidth`` parameter can be used to change column widths, which by default is just big enough to fit the contents. Setting it to ``10``, for instance, means that all columns are 10 characters wide. Setting it to ``[20, 10]`` means that the first column is 20 characters wide and the subsequent ones are 10. Unless all columns are specified, the last specified width is repeated for the remaining columns.
Content that is too long for its cell is truncated using the string ``suffix`` (default: ``'...'``). Example::
>>> print_table(grades, colwidth=10)
Math English History Comment
Bob 1.2 2.1 5.9 Failed ...
Jane 2.4 1.1 1.4 Quite good
Trevor 2.2 4.4 3.2 Somewha...
The spacing between the columns is ``spacing`` characters (default: ``2``).
If the total table width exceeds ``maxwidth`` the column indicated by ``truncate`` (default: ``0``) is truncated on rows that are too long. If ``maxwidth`` is not specified it will be taken as the terminal width minus 1. This truncation overrides settings in ``colwidth``.
Beware that no columns can have zero or negative width. If for instance ``maxwidth`` is 80 and ``colwidth`` is ``[10, 30, 30, 30]`` with spacing 2 the total width will initially be 10+2+30+2+30+2+30=106. That's 26 characters too much, so a width of 26 will be removed from the truncated column. If ``truncate`` is 0, column 0 will have a width of -16 which is not permitted.
Example: Sorting a Table
~~~~~~~~~~~~~~~~~~~~~~~~
Consider printing sorted table of the race times of a 10km run. The race times in seconds is already in a table, and we supply a separate header row::
>>> from frmt import format_time
>>> header = ['Name' , 'Time']
>>> race = [['John' , 3672 ],
... ['Martha', 2879 ],
... ['Stuart', 2934 ],
... ['Eduard', 2592 ]]
>>> race.sort(key=lambda row: row[1])
>>> race.insert(0, header)
>>> print_table(race, '<>', format_time)
Name Time
Eduard 43:12
Martha 47:59
Stuart 48:54
John 1:01:12
Example: Transposing a Table
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A table can be transposed using ``zip`` along with the ``*`` operator::
>>> print_table(zip(*grades))
Bob Jane Trevor
Math 1.2 2.4 2.2
English 2.1 1.1 4.4
History 5.9 1.4 3.2
Comment Failed at history Quite good Somewhat average
``zip(*grades)``, which is the equivalent of ``zip(grades[0], grades[1], grades[2], grades[3])``, isn't actually a list of lists. It is nonetheless an iterable of an iterable, and therefore perfectly understandable by ``*_table()``.
If you still want a list of list, e.g. for preprocessing the table, you could do ``list(map(list,zip(*grades)))``.
A common pattern is having a set of lists (or 1D NumPy arrays) and wanting to print them as columns. Here's an example of that::
>>> time = [0.0, 0.001, 0.002, 0.003, 0.004, 0.005,
... 0.006, 0.007, 0.008, 0.009, 0.01]
>>> voltage = [0.16159999923218293, 0.5415871693699631, 1.0020586304683154,
... 1.6493888138044273, 2.158470579371153, 2.543489191597334,
... 3.1235687589204497, 3.6155358393212573, 4.111913772930216,
... 4.505017235628538, 5.0961076665212595]
>>> current = [0.05832942704771176, 0.1038533048639953, 0.06263011126285473,
... 0.1633588946456795, 0.16602352409683588, 0.18539040280004443,
... 0.24946423631204423, 0.19856685230794482,
... 0.19223623526732384, 0.20666111673691043, 0.1259131288654157]
>>> header = ['Time', 'Voltage', 'Current']
>>> measurements = list(zip(time, voltage, current))
>>> measurements.insert(0, header)
>>> print_table(measurements, '>', '{:.3f}')
Time Voltage Current
0.000 0.162 0.058
0.001 0.542 0.104
0.002 1.002 0.063
0.003 1.649 0.163
0.004 2.158 0.166
0.005 2.543 0.185
0.006 3.124 0.249
0.007 3.616 0.199
0.008 4.112 0.192
0.009 4.505 0.207
0.010 5.096 0.126
``*_time*()``
-------------
Signature: ``*_time*(seconds)``
``*_time()`` represents time given in seconds using the format ``dd:hh:mm:ss`` when ``abs(seconds) >= 60`` and using SI-prefixes and three significant figures otherwise. This gives a convenient resolution for the widest range of magnitudes. ``*_time_large()`` always uses the former format and ``*_time_small()`` always uses the latter. Rounding is taken care of. Examples::
>>> from frmt import print_time, print_time_small, print_time_large
>>> print_time(24*60*60)
1:00:00:00
>>> print_time(90)
1:30
>>> print_time(30)
30.0s
>>> print_time(0.01255)
12.6ms
>>> print_time_small(90)
90.0s
>>> print_time_large(30)
30
>>> print_time(float('nan'))
-
``*_fit()``
--------------
Signature: ``*_fit(text, width=None, align='<', suffix="...")``
``*_fit()`` fits a piece of text to ``width`` characters by truncating too long text and padding too short text with spaces. Truncation is indicated by a customizable suffix ``suffix`` (default: ``'...'``). Examples::
>>> from frmt import format_fit
>>> format_fit('abcdefgh', 6) == 'abc...' # truncation
True
>>> format_fit('abcdefgh', 6, suffix='!') == 'abcde!' # truncation
True
>>> format_fit('abc', 6) == 'abc ' # padding
True
The contents can be left, centered or right aligned by setting ``align`` to ``'<'``, ``'^'`` or ``'>'``, respectively::
>>> format_fit('abc', 6, '^') == ' abc '
True
>>> format_fit('abc', 6, '>') == ' abc'
True
If ``width`` is not specified it is taken to be the terminal width. Hence ``print_fit(s)`` is equivalent to ``print(s)`` except that ``s`` will be truncated such as to not spill over to the next line in the terminal.
| PypiClean |
/MCREPOGEN-0.3.tar.gz/MCREPOGEN-0.3/mcrepogen/configobj.py |
# ConfigObj 4
# http://www.voidspace.org.uk/python/configobj.html
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# For information about bugfixes, updates and support, please join the
# ConfigObj mailing list:
# http://lists.sourceforge.net/lists/listinfo/configobj-develop
# Comments, suggestions and bug reports welcome.
from __future__ import generators
import sys
INTP_VER = sys.version_info[:2]
if INTP_VER < (2, 2):
raise RuntimeError("Python v.2.2 or later needed")
import os, re
compiler = None
try:
import compiler
except ImportError:
# for IronPython
pass
from types import StringTypes
from warnings import warn
try:
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
except ImportError:
# Python 2.2 does not have these
# UTF-8
BOM_UTF8 = '\xef\xbb\xbf'
# UTF-16, little endian
BOM_UTF16_LE = '\xff\xfe'
# UTF-16, big endian
BOM_UTF16_BE = '\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM_UTF16 = BOM_UTF16_LE
else:
# UTF-16, native endianness
BOM_UTF16 = BOM_UTF16_BE
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\t\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
try:
enumerate
except NameError:
def enumerate(obj):
"""enumerate for Python 2.2."""
i = -1
for item in obj:
i += 1
yield i, item
try:
True, False
except NameError:
True, False = 1, 0
__version__ = '4.5.2'
__revision__ = '$Id: configobj.py 156 2006-01-31 14:57:08Z fuzzyman $'
__docformat__ = "restructuredtext en"
__all__ = (
'__version__',
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'__docformat__',
'flatten_errors',
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
def getObj(s):
s = "a=" + s
if compiler is None:
raise ImportError('compiler module not available')
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
m = getattr(self, 'build_' + o.__class__.__name__, None)
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return map(self.build, o.getChildren())
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = i.next()
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = map(self.build_Const, o.getChildren())
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
return _builder.build(getObj(s))
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
self.message = message
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
InterpolationError.__init__(
self,
'missing option "%s" in interpolation.' % option)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if backtrail.has_key((key, section.name)):
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None:
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None:
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.iteritems():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# for the configspec
self.configspec = {}
self._order = []
self._configspec_comments = {}
self._configspec_inline_comments = {}
self._cs_section_comments = {}
self._cs_section_inline_comments = {}
# for defaults
self.defaults = []
self.default_values = {}
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation and isinstance(val, StringTypes):
return self._interpolate(key, val)
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
`unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, StringTypes):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if not self.comments.has_key(key):
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if not self.has_key(key):
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, dict) and not unrepr:
# First create the new depth level,
# then create the section
if not self.has_key(key):
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if not self.has_key(key):
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, StringTypes):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, StringTypes):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, *args):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
val = dict.pop(self, key, *args)
if key in self.scalars:
del self.comments[key]
del self.inline_comments[key]
self.scalars.remove(key)
elif key in self.sections:
del self.comments[key]
del self.inline_comments[key]
self.sections.remove(key)
if self.main.interpolation and isinstance(val, StringTypes):
return self._interpolate(key, val)
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = {}
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return zip((self.scalars + self.sections), self.values())
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(self.items())
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(self.values())
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key])))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
{'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}
"""
for key, val in indict.items():
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. caution::
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
{'XXXXsection': {'XXXXkey': 'XXXXvalue'}}
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
{'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def decode(self, encoding):
"""
Decode all strings and values to unicode, using the specified encoding.
Works with subsections and list values.
Uses the ``walk`` method.
Testing ``encode`` and ``decode``.
>>> m = ConfigObj(a)
>>> m.decode('ascii')
>>> def testuni(val):
... for entry in val:
... if not isinstance(entry, unicode):
... print >> sys.stderr, type(entry)
... raise AssertionError, 'decode failed.'
... if isinstance(val[entry], dict):
... testuni(val[entry])
... elif not isinstance(val[entry], unicode):
... raise AssertionError, 'decode failed.'
>>> testuni(m)
>>> m.encode('ascii')
>>> a == m
1
"""
warn('use of ``decode`` is deprecated.', DeprecationWarning)
def decode(section, key, encoding=encoding, warn=True):
""" """
val = section[key]
if isinstance(val, (list, tuple)):
newval = []
for entry in val:
newval.append(entry.decode(encoding))
elif isinstance(val, dict):
newval = val
else:
newval = val.decode(encoding)
newkey = key.decode(encoding)
section.rename(key, newkey)
section[newkey] = newval
# using ``call_on_sections`` allows us to modify section names
self.walk(decode, call_on_sections=True)
def encode(self, encoding):
"""
Encode all strings and values from unicode,
using the specified encoding.
Works with subsections and list values.
Uses the ``walk`` method.
"""
warn('use of ``encode`` is deprecated.', DeprecationWarning)
def encode(section, key, encoding=encoding):
""" """
val = section[key]
if isinstance(val, (list, tuple)):
newval = []
for entry in val:
newval.append(entry.encode(encoding))
elif isinstance(val, dict):
newval = val
else:
newval = val.encode(encoding)
newkey = key.encode(encoding)
section.rename(key, newkey)
section[newkey] = newval
self.walk(encode, call_on_sections=True)
def istrue(self, key):
"""A deprecated version of ``as_bool``."""
warn('use of ``istrue`` is deprecated. Use ``as_bool`` method '
'instead.', DeprecationWarning)
return self.as_bool(key)
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, StringTypes):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int(): fish
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int(): 3.2
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a')
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b')
3.2000000000000002
"""
return float(self[key])
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#].*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, **kwargs):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, options=None, **kwargs)``
"""
# init the superclass
Section.__init__(self, self, 0, self)
if infile is None:
infile = []
if options is None:
options = {}
else:
options = dict(options)
# keyword arguments take precedence over an options dictionary
options.update(kwargs)
defaults = OPTION_DEFAULTS.copy()
# TODO: check the values too.
for entry in options:
if entry not in defaults:
raise TypeError('Unrecognised option "%s".' % entry)
# Add any explicit options to the defaults
defaults.update(options)
self._initialise(defaults)
configspec = defaults['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, StringTypes):
self.filename = infile
if os.path.isfile(infile):
h = open(infile, 'rb')
infile = h.read() or []
h.close()
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
h = open(infile, 'w')
h.write('')
h.close()
infile = []
elif isinstance(infile, (list, tuple)):
infile = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
infile = infile.dict()
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif hasattr(infile, 'read'):
# This supports file like objects
infile = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if infile:
# don't do it for the empty ConfigObj
infile = self._handle_bom(infile)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in infile:
if (not line) or (line[-1] not in ('\r', '\n', '\r\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
infile = [line.rstrip('\r\n') for line in infile]
self._parse(infile)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = {}
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
return ('ConfigObj({%s})' %
', '.join([('%s: %s' % (repr(key), repr(self[key])))
for key in (self.scalars + self.sections)]))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in BOMS.items():
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in BOMS.items():
if not line.startswith(BOM):
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF8 - don't decode
if isinstance(infile, StringTypes):
return infile.splitlines(True)
else:
return infile
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, just return
if isinstance(infile, StringTypes):
# infile read from a file will be a single string
return infile.splitlines(True)
return infile
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if self.encoding:
return aString.decode('ascii')
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, StringTypes):
# can't be unicode
# NOTE: Could raise a ``UnicodeDecodeError``
return infile.decode(encoding).splitlines(True)
for i, line in enumerate(infile):
if not isinstance(line, unicode):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if not self.encoding:
return line
if isinstance(line, str) and self.default_encoding:
return line.decode(self.default_encoding)
return line
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, StringTypes):
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth at line %s.",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level at line %s.",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested at line %s.",
NestingError, infile, cur_index)
sect_name = self._unquote(sect_name)
if parent.has_key(sect_name):
self._handle_error('Duplicate section name at line %s.',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
# it neither matched as a keyword
# or a section marker
self._handle_error(
'Invalid line at line "%s".',
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
(value, comment, cur_index) = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception, e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception, e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if this_section.has_key(key):
self._handle_error(
'Duplicate keyword name at line %s.',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = text % cur_index
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
Don't quote values that don't need it.
Recursively quote members of a list and return a comma joined list.
Multiline is ``False`` for lists.
Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, StringTypes):
if self.stringify:
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
list_values=False)
except ConfigObjError, e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError, e:
raise IOError('Reading configspec failed: %s' % e)
self._set_configspec_value(configspec, self)
def _set_configspec_value(self, configspec, section):
"""Used to recursively set configspec values."""
if '__many__' in configspec.sections:
section.configspec['__many__'] = configspec['__many__']
if len(configspec.sections) > 1:
# FIXME: can we supply any useful information here ?
raise RepeatSectionError()
if hasattr(configspec, 'initial_comment'):
section._configspec_initial_comment = configspec.initial_comment
section._configspec_final_comment = configspec.final_comment
section._configspec_encoding = configspec.encoding
section._configspec_BOM = configspec.BOM
section._configspec_newlines = configspec.newlines
section._configspec_indent_type = configspec.indent_type
for entry in configspec.scalars:
section._configspec_comments[entry] = configspec.comments[entry]
section._configspec_inline_comments[entry] = configspec.inline_comments[entry]
section.configspec[entry] = configspec[entry]
section._order.append(entry)
for entry in configspec.sections:
if entry == '__many__':
continue
section._cs_section_comments[entry] = configspec.comments[entry]
section._cs_section_inline_comments[entry] = configspec.inline_comments[entry]
if not section.has_key(entry):
section[entry] = {}
self._set_configspec_value(configspec[entry], section[entry])
def _handle_repeat(self, section, configspec):
"""Dynamically assign configspec for repeated section."""
try:
section_keys = configspec.sections
scalar_keys = configspec.scalars
except AttributeError:
section_keys = [entry for entry in configspec
if isinstance(configspec[entry], dict)]
scalar_keys = [entry for entry in configspec
if not isinstance(configspec[entry], dict)]
if '__many__' in section_keys and len(section_keys) > 1:
# FIXME: can we supply any useful information here ?
raise RepeatSectionError()
scalars = {}
sections = {}
for entry in scalar_keys:
val = configspec[entry]
scalars[entry] = val
for entry in section_keys:
val = configspec[entry]
if entry == '__many__':
scalars[entry] = val
continue
sections[entry] = val
section.configspec = scalars
for entry in sections:
if not section.has_key(entry):
section[entry] = {}
self._handle_repeat(section[entry], sections[entry])
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, dict):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
output = self._a_to_u(newline).join(out)
if self.encoding:
output = output.encode(self.encoding)
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output = BOM_UTF8 + output
if not output.endswith(newline):
output += newline
if outfile is not None:
outfile.write(output)
else:
h = open(self.filename, 'wb')
h.write(output)
h.close()
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
#
spec_section = section.configspec
if copy and hasattr(section, '_configspec_initial_comment'):
section.initial_comment = section._configspec_initial_comment
section.final_comment = section._configspec_final_comment
section.encoding = section._configspec_encoding
section.BOM = section._configspec_BOM
section.newlines = section._configspec_newlines
section.indent_type = section._configspec_indent_type
if '__many__' in section.configspec:
many = spec_section['__many__']
# dynamically assign the configspecs
# for the sections below
for entry in section.sections:
self._handle_repeat(section[entry], many)
#
out = {}
ret_true = True
ret_false = True
order = [k for k in section._order if k in spec_section]
order += [k for k in spec_section if k not in order]
for entry in order:
if entry == '__many__':
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and not entry in section.scalars:
# copy comments
section.comments[entry] = (
section._configspec_comments.get(entry, []))
section.inline_comments[entry] = (
section._configspec_inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
try:
check = validator.check(spec_section[entry],
val,
missing=missing
)
except validator.baseErrorClass, e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
try:
section.default_values.pop(entry, None)
except AttributeError:
# For Python 2.2 compatibility
try:
del section.default_values[entry]
except KeyError:
pass
if hasattr(validator, 'get_default_value'):
try:
section.default_values[entry] = validator.get_default_value(spec_section[entry])
except KeyError:
# No default
pass
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if copy:
section.comments[entry] = section._cs_section_comments[entry]
section.inline_comments[entry] = (
section._cs_section_inline_comments[entry])
check = self.validate(validator, preserve_errors=preserve_errors,
copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
ret_false = False
#
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, StringTypes):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
# Check / processing functions for options
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.
Returns a list of keys that failed. Each member of the list is a tuple :
::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
>>> import validate
>>> vtor = validate.Validator()
>>> my_ini = '''
... option1 = True
... [section1]
... option1 = True
... [section2]
... another_option = Probably
... [section3]
... another_option = True
... [[section3b]]
... value = 3
... value2 = a
... value3 = 11
... '''
>>> my_cfg = '''
... option1 = boolean()
... option2 = boolean()
... option3 = boolean(default=Bad_value)
... [section1]
... option1 = boolean()
... option2 = boolean()
... option3 = boolean(default=Bad_value)
... [section2]
... another_option = boolean()
... [section3]
... another_option = boolean()
... [[section3b]]
... value = integer
... value2 = integer
... value3 = integer(0, 10)
... [[[section3b-sub]]]
... value = string
... [section4]
... another_option = boolean()
... '''
>>> cs = my_cfg.split('\\n')
>>> ini = my_ini.split('\\n')
>>> cfg = ConfigObj(ini, configspec=cs)
>>> res = cfg.validate(vtor, preserve_errors=True)
>>> errors = []
>>> for entry in flatten_errors(cfg, res):
... section_list, key, error = entry
... section_list.insert(0, '[root]')
... if key is not None:
... section_list.append(key)
... else:
... section_list.append('[missing]')
... section_string = ', '.join(section_list)
... errors.append((section_string, ' = ', error))
>>> errors.sort()
>>> for entry in errors:
... print entry[0], entry[1], (entry[2] or 0)
[root], option2 = 0
[root], option3 = the value "Bad_value" is of the wrong type.
[root], section1, option2 = 0
[root], section1, option3 = the value "Bad_value" is of the wrong type.
[root], section2, another_option = the value "Probably" is of the wrong type.
[root], section3, section3b, section3b-sub, [missing] = 0
[root], section3, section3b, value2 = the value "a" is of the wrong type.
[root], section3, section3b, value3 = the value "11" is too big.
[root], section4, [missing] = 0
"""
if levels is None:
# first time called
levels = []
results = []
if res is True:
return results
if res is False:
results.append((levels[:], None, False))
if levels:
levels.pop()
return results
for (key, val) in res.items():
if val == True:
continue
if isinstance(cfg.get(key), dict):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return results
"""*A programming language is a medium of expression.* - Paul Graham""" | PypiClean |
/NDBD_distributions-0.1.tar.gz/NDBD_distributions-0.1/NDBD_distributions/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/MXFusion-0.3.1.tar.gz/MXFusion-0.3.1/mxfusion/components/distributions/laplace.py | from ...common.config import get_default_MXNet_mode
from ..variables import Variable
from .univariate import UnivariateDistribution
class Laplace(UnivariateDistribution):
"""
The one-dimensional Laplace distribution. The Laplace distribution can be defined over a scalar random variable
or an array of random variables. In case of an array of random variables, the location and scale are broadcasted
to the shape of the output random variable (array).
:param location: Location of the Laplace distribution.
:type location: Variable
:param scale: Scale of the Laplace distribution.
:type scale: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, location, scale, rand_gen=None, dtype=None, ctx=None):
if not isinstance(location, Variable):
location = Variable(value=location)
if not isinstance(scale, Variable):
scale = Variable(value=scale)
inputs = [('location', location), ('scale', scale)]
input_names = [k for k, _ in inputs]
output_names = ['random_variable']
super(Laplace, self).__init__(inputs=inputs, outputs=None,
input_names=input_names,
output_names=output_names,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def log_pdf_impl(self, location, scale, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the Laplace distribution.
:param location: the location of the Laplace distribution.
:type location: MXNet NDArray or MXNet Symbol
:param scale: the scale of the Laplace distributions.
:type scale: MXNet NDArray or MXNet Symbol
:param random_variable: the random variable of the Laplace distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
logvar = -F.log(2 * scale)
logL = F.broadcast_minus(logvar, F.broadcast_div(
F.abs(F.broadcast_minus(random_variable, location)), scale)) * self.log_pdf_scaling
return logL
def draw_samples_impl(self, location, scale, rv_shape, num_samples=1, F=None):
"""
Draw samples from the Laplace distribution.
:param location: the location of the Laplace distribution.
:type location: MXNet NDArray or MXNet Symbol
:param scale: the scale of the Laplace distributions.
:type scale: MXNet NDArray or MXNet Symbol
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param num_samples: the number of drawn samples (default: one).
:type num_samples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the Laplace distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
out_shape = (num_samples,) + rv_shape
return F.broadcast_add(F.broadcast_mul(self._rand_gen.sample_laplace(
shape=out_shape, dtype=self.dtype, ctx=self.ctx),
scale), location)
@staticmethod
def define_variable(location=0., scale=1., shape=None, rand_gen=None, dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a Laplace distribution.
:param location: Location of the distribution.
:param scale: Scale of the distribution.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the Laplace distribution.
:rtypes: Variable
"""
var = Laplace(location=location, scale=scale, rand_gen=rand_gen, dtype=dtype, ctx=ctx)
var._generate_outputs(shape=shape)
return var.random_variable | PypiClean |
/BrainLes_HD_BET-0.0.3-py3-none-any.whl/brainles_hd_bet/network_architecture.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from brainles_hd_bet.utils import softmax_helper
class EncodingModule(nn.Module):
def __init__(self, in_channels, out_channels, filter_size=3, dropout_p=0.3, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True):
nn.Module.__init__(self)
self.dropout_p = dropout_p
self.lrelu_inplace = lrelu_inplace
self.inst_norm_affine = inst_norm_affine
self.conv_bias = conv_bias
self.leakiness = leakiness
self.bn_1 = nn.InstanceNorm3d(in_channels, affine=self.inst_norm_affine, track_running_stats=True)
self.conv1 = nn.Conv3d(in_channels, out_channels, filter_size, 1, (filter_size - 1) // 2, bias=self.conv_bias)
self.dropout = nn.Dropout3d(dropout_p)
self.bn_2 = nn.InstanceNorm3d(in_channels, affine=self.inst_norm_affine, track_running_stats=True)
self.conv2 = nn.Conv3d(out_channels, out_channels, filter_size, 1, (filter_size - 1) // 2, bias=self.conv_bias)
def forward(self, x):
skip = x
x = F.leaky_relu(self.bn_1(x), negative_slope=self.leakiness, inplace=self.lrelu_inplace)
x = self.conv1(x)
if self.dropout_p is not None and self.dropout_p > 0:
x = self.dropout(x)
x = F.leaky_relu(self.bn_2(x), negative_slope=self.leakiness, inplace=self.lrelu_inplace)
x = self.conv2(x)
x = x + skip
return x
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=True):
super(Upsample, self).__init__()
self.align_corners = align_corners
self.mode = mode
self.scale_factor = scale_factor
self.size = size
def forward(self, x):
return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,
align_corners=self.align_corners)
class LocalizationModule(nn.Module):
def __init__(self, in_channels, out_channels, leakiness=1e-2, conv_bias=True, inst_norm_affine=True,
lrelu_inplace=True):
nn.Module.__init__(self)
self.lrelu_inplace = lrelu_inplace
self.inst_norm_affine = inst_norm_affine
self.conv_bias = conv_bias
self.leakiness = leakiness
self.conv1 = nn.Conv3d(in_channels, in_channels, 3, 1, 1, bias=self.conv_bias)
self.bn_1 = nn.InstanceNorm3d(in_channels, affine=self.inst_norm_affine, track_running_stats=True)
self.conv2 = nn.Conv3d(in_channels, out_channels, 1, 1, 0, bias=self.conv_bias)
self.bn_2 = nn.InstanceNorm3d(out_channels, affine=self.inst_norm_affine, track_running_stats=True)
def forward(self, x):
x = F.leaky_relu(self.bn_1(self.conv1(x)), negative_slope=self.leakiness, inplace=self.lrelu_inplace)
x = F.leaky_relu(self.bn_2(self.conv2(x)), negative_slope=self.leakiness, inplace=self.lrelu_inplace)
return x
class UpsamplingModule(nn.Module):
def __init__(self, in_channels, out_channels, leakiness=1e-2, conv_bias=True, inst_norm_affine=True,
lrelu_inplace=True):
nn.Module.__init__(self)
self.lrelu_inplace = lrelu_inplace
self.inst_norm_affine = inst_norm_affine
self.conv_bias = conv_bias
self.leakiness = leakiness
self.upsample = Upsample(scale_factor=2, mode="trilinear", align_corners=True)
self.upsample_conv = nn.Conv3d(in_channels, out_channels, 3, 1, 1, bias=self.conv_bias)
self.bn = nn.InstanceNorm3d(out_channels, affine=self.inst_norm_affine, track_running_stats=True)
def forward(self, x):
x = F.leaky_relu(self.bn(self.upsample_conv(self.upsample(x))), negative_slope=self.leakiness,
inplace=self.lrelu_inplace)
return x
class DownsamplingModule(nn.Module):
def __init__(self, in_channels, out_channels, leakiness=1e-2, conv_bias=True, inst_norm_affine=True,
lrelu_inplace=True):
nn.Module.__init__(self)
self.lrelu_inplace = lrelu_inplace
self.inst_norm_affine = inst_norm_affine
self.conv_bias = conv_bias
self.leakiness = leakiness
self.bn = nn.InstanceNorm3d(in_channels, affine=self.inst_norm_affine, track_running_stats=True)
self.downsample = nn.Conv3d(in_channels, out_channels, 3, 2, 1, bias=self.conv_bias)
def forward(self, x):
x = F.leaky_relu(self.bn(x), negative_slope=self.leakiness, inplace=self.lrelu_inplace)
b = self.downsample(x)
return x, b
class Network(nn.Module):
def __init__(self, num_classes=4, num_input_channels=4, base_filters=16, dropout_p=0.3,
final_nonlin=softmax_helper, leakiness=1e-2, conv_bias=True, inst_norm_affine=True,
lrelu_inplace=True, do_ds=True):
super(Network, self).__init__()
self.do_ds = do_ds
self.lrelu_inplace = lrelu_inplace
self.inst_norm_affine = inst_norm_affine
self.conv_bias = conv_bias
self.leakiness = leakiness
self.final_nonlin = final_nonlin
self.init_conv = nn.Conv3d(num_input_channels, base_filters, 3, 1, 1, bias=self.conv_bias)
self.context1 = EncodingModule(base_filters, base_filters, 3, dropout_p, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.down1 = DownsamplingModule(base_filters, base_filters * 2, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.context2 = EncodingModule(2 * base_filters, 2 * base_filters, 3, dropout_p, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.down2 = DownsamplingModule(2 * base_filters, base_filters * 4, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.context3 = EncodingModule(4 * base_filters, 4 * base_filters, 3, dropout_p, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.down3 = DownsamplingModule(4 * base_filters, base_filters * 8, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.context4 = EncodingModule(8 * base_filters, 8 * base_filters, 3, dropout_p, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.down4 = DownsamplingModule(8 * base_filters, base_filters * 16, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.context5 = EncodingModule(16 * base_filters, 16 * base_filters, 3, dropout_p, leakiness=1e-2,
conv_bias=True, inst_norm_affine=True, lrelu_inplace=True)
self.bn_after_context5 = nn.InstanceNorm3d(16 * base_filters, affine=self.inst_norm_affine, track_running_stats=True)
self.up1 = UpsamplingModule(16 * base_filters, 8 * base_filters, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.loc1 = LocalizationModule(16 * base_filters, 8 * base_filters, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.up2 = UpsamplingModule(8 * base_filters, 4 * base_filters, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.loc2 = LocalizationModule(8 * base_filters, 4 * base_filters, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.loc2_seg = nn.Conv3d(4 * base_filters, num_classes, 1, 1, 0, bias=False)
self.up3 = UpsamplingModule(4 * base_filters, 2 * base_filters, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.loc3 = LocalizationModule(4 * base_filters, 2 * base_filters, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.loc3_seg = nn.Conv3d(2 * base_filters, num_classes, 1, 1, 0, bias=False)
self.up4 = UpsamplingModule(2 * base_filters, 1 * base_filters, leakiness=1e-2, conv_bias=True,
inst_norm_affine=True, lrelu_inplace=True)
self.end_conv_1 = nn.Conv3d(2 * base_filters, 2 * base_filters, 3, 1, 1, bias=self.conv_bias)
self.end_conv_1_bn = nn.InstanceNorm3d(2 * base_filters, affine=self.inst_norm_affine, track_running_stats=True)
self.end_conv_2 = nn.Conv3d(2 * base_filters, 2 * base_filters, 3, 1, 1, bias=self.conv_bias)
self.end_conv_2_bn = nn.InstanceNorm3d(2 * base_filters, affine=self.inst_norm_affine, track_running_stats=True)
self.seg_layer = nn.Conv3d(2 * base_filters, num_classes, 1, 1, 0, bias=False)
def forward(self, x):
seg_outputs = []
x = self.init_conv(x)
x = self.context1(x)
skip1, x = self.down1(x)
x = self.context2(x)
skip2, x = self.down2(x)
x = self.context3(x)
skip3, x = self.down3(x)
x = self.context4(x)
skip4, x = self.down4(x)
x = self.context5(x)
x = F.leaky_relu(self.bn_after_context5(x), negative_slope=self.leakiness, inplace=self.lrelu_inplace)
x = self.up1(x)
x = torch.cat((skip4, x), dim=1)
x = self.loc1(x)
x = self.up2(x)
x = torch.cat((skip3, x), dim=1)
x = self.loc2(x)
loc2_seg = self.final_nonlin(self.loc2_seg(x))
seg_outputs.append(loc2_seg)
x = self.up3(x)
x = torch.cat((skip2, x), dim=1)
x = self.loc3(x)
loc3_seg = self.final_nonlin(self.loc3_seg(x))
seg_outputs.append(loc3_seg)
x = self.up4(x)
x = torch.cat((skip1, x), dim=1)
x = F.leaky_relu(self.end_conv_1_bn(self.end_conv_1(x)), negative_slope=self.leakiness,
inplace=self.lrelu_inplace)
x = F.leaky_relu(self.end_conv_2_bn(self.end_conv_2(x)), negative_slope=self.leakiness,
inplace=self.lrelu_inplace)
x = self.final_nonlin(self.seg_layer(x))
seg_outputs.append(x)
if self.do_ds:
return seg_outputs[::-1]
else:
return seg_outputs[-1] | PypiClean |
/MSWinPrint-1.1.tar.gz/MSWinPrint-1.1/MSWinPrint.py | # "constants" for use with printer setup calls
HORZRES = 8
VERTRES = 10
LOGPIXELSX = 88
LOGPIXELSY = 90
PHYSICALWIDTH = 110
PHYSICALHEIGHT = 111
import win32gui, win32ui, win32print, win32con
try:
from PIL import ImageWin
except:
ImageWin = None
scale_factor = 20
prdict = None
paper_sizes = {
"letter": 1,
"lettersmall": 2,
"tabloid": 3,
"ledger": 4,
"legal": 5,
"statement": 6,
"executive": 7,
"a3": 8,
"a4": 9,
"envelope9": 19,
"envelope10": 20,
"envelope11": 21,
"envelope12": 22,
"envelope14": 23,
"fanfold": 39,
}
orientations = {
"portrait": 1,
"landscape": 2,
}
duplexes = {
"normal": 1,
"none": 1,
"long": 2,
"short": 3,
}
class document:
def __init__(self, printer = None, papersize = None, orientation = None, duplex = None):
self.dc = None
self.font = None
self.printer = printer
self.papersize = papersize
self.orientation = orientation
self.page = 0
self.duplex = duplex
def scalepos(self, pos):
rc = []
for i in range(len(pos)):
p = pos[i]
if i % 2:
p *= -1
rc.append(int(p * scale_factor))
return tuple(rc)
def begin_document(self, desc = "MSWinPrint.py print job"):
# open the printer
if self.printer is None:
self.printer = win32print.GetDefaultPrinter()
self.hprinter = win32print.OpenPrinter(self.printer)
# load default settings
devmode = win32print.GetPrinter(self.hprinter, 8)["pDevMode"]
# change paper size and orientation
if self.papersize is not None:
if type(self.papersize) is int:
devmode.PaperSize = self.papersize
else:
devmode.PaperSize = paper_sizes[self.papersize]
if self.orientation is not None:
devmode.Orientation = orientations[self.orientation]
if self.duplex is not None:
devmode.Duplex = duplexes[self.duplex]
# create dc using new settings
self.hdc = win32gui.CreateDC("WINSPOOL", self.printer, devmode)
self.dc = win32ui.CreateDCFromHandle(self.hdc)
# self.dc = win32ui.CreateDC()
# if self.printer is not None:
# self.dc.CreatePrinterDC(self.printer)
# else:
# self.dc.CreatePrinterDC()
self.dc.SetMapMode(win32con.MM_TWIPS) # hundredths of inches
self.dc.StartDoc(desc)
self.pen = win32ui.CreatePen(0, int(scale_factor), 0L)
self.dc.SelectObject(self.pen)
self.page = 1
def end_document(self):
if self.page == 0:
return # document was never started
self.dc.EndDoc()
del self.dc
def end_page(self):
if self.page == 0:
return # nothing on the page
self.dc.EndPage()
self.page += 1
def getsize(self):
if self.page == 0:
self.begin_document()
# returns printable (width, height) in points
width = float(self.dc.GetDeviceCaps(HORZRES)) * (72.0 / self.dc.GetDeviceCaps(LOGPIXELSX))
height = float(self.dc.GetDeviceCaps(VERTRES)) * (72.0 / self.dc.GetDeviceCaps(LOGPIXELSY))
return width, height
def line(self, from_, to):
if self.page == 0:
self.begin_document()
self.dc.MoveTo(self.scalepos(from_))
self.dc.LineTo(self.scalepos(to))
def rectangle(self, box):
if self.page == 0:
self.begin_document()
self.dc.MoveTo(self.scalepos((box[0], box[1])))
self.dc.LineTo(self.scalepos((box[2], box[1])))
self.dc.LineTo(self.scalepos((box[2], box[3])))
self.dc.LineTo(self.scalepos((box[0], box[3])))
self.dc.LineTo(self.scalepos((box[0], box[1])))
def text(self, position, text):
if self.page == 0:
self.begin_document()
self.dc.TextOut(scale_factor * position[0],
-1 * scale_factor * position[1], text)
def setfont(self, name, size, bold = None):
if self.page == 0:
self.begin_document()
wt = 400
if bold:
wt = 700
self.font = getfont(name, size, wt)
self.dc.SelectObject(self.font)
def image(self, position, image, size):
"print PIL image at position with given size"
if ImageWin is None:
raise NotImplementedError, "PIL required for image method"
if self.page == 0:
self.begin_document()
dib = ImageWin.Dib(image)
endpos = (position[0] + size[0], position[1] + size[1])
dest = (position[0] * scale_factor,
-1 * position[1] * scale_factor,
endpos[0] * scale_factor,
-1 * endpos[1] * scale_factor)
dib.draw(self.hdc, dest)
def setink(self, ink):
pass
def setfill(self, onoff):
pass
def build_dict():
global prdict
lst = win32print.EnumPrinters(
win32print.PRINTER_ENUM_CONNECTIONS
+ win32print.PRINTER_ENUM_LOCAL)
prdict = {}
for flags, description, name, comment in lst:
prdict[name] = {}
prdict[name]["flags"] = flags
prdict[name]["description"] = description
prdict[name]["comment"] = comment
def listprinters():
dft = win32print.GetDefaultPrinter()
if prdict is None:
build_dict()
keys = prdict.keys()
keys.sort()
rc = [ dft ]
for k in keys:
if k != dft:
rc.append(k)
return rc
def desc(name):
if prdict == None:
listprinters()
return prdict[name]
def getfont(name, size, weight = 400):
return win32ui.CreateFont({
"name": name,
"height": scale_factor * size,
"weight": weight,
})
if __name__ == "__main__":
doc = document(orientation = "landscape")
doc.begin_document()
doc.setfont("Arial", 32)
doc.text((72, 72), "Testing...")
doc.text((72, 72+48), "Testing #2")
doc.rectangle((72, 72, 72*6, 72*3))
doc.line((72, 72), (72*6, 72*3))
doc.end_document()
# end of file. | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/Asana-Math/Fraktur/Regular/Main.js | MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS.AsanaMathJax_Fraktur={directory:"Fraktur/Regular",family:"AsanaMathJax_Fraktur",testString:"\u210C\u2128\u212D\uD835\uDD04\uD835\uDD05\uD835\uDD07\uD835\uDD08\uD835\uDD09\uD835\uDD0A\uD835\uDD0D\uD835\uDD0E\uD835\uDD0F\uD835\uDD10\uD835\uDD11\uD835\uDD12",32:[0,0,249,0,0],8460:[719,166,697,29,657],8488:[709,171,697,-7,608],8493:[719,4,645,53,629],120068:[721,4,697,20,675],120069:[720,7,801,60,747],120071:[708,4,801,69,746],120072:[719,4,645,54,629],120073:[715,157,697,74,663],120074:[721,4,801,88,740],120077:[719,162,645,-1,586],120078:[716,4,697,2,659],120079:[719,4,645,37,603],120080:[714,4,957,11,936],120081:[716,6,748,16,716],120082:[707,4,801,42,754],120083:[721,163,801,37,715],120084:[706,4,801,41,800],120086:[706,4,801,103,757],120087:[707,4,697,42,688],120088:[720,4,697,49,683],120089:[714,4,801,48,705],120090:[713,-2,957,25,931],120091:[719,4,645,29,629],120092:[719,165,748,19,641],120094:[504,6,478,67,469],120095:[683,9,478,23,436],120096:[500,4,374,85,356],120097:[696,4,478,54,447],120098:[503,5,426,78,392],120099:[719,162,322,27,293],120100:[505,163,478,54,443],120101:[696,165,478,25,438],120102:[703,4,270,32,258],120103:[705,169,270,32,229],120104:[702,4,322,21,308],120105:[696,5,270,42,265],120106:[499,4,801,24,774],120107:[499,4,530,16,518],120108:[502,4,478,69,447],120109:[505,161,530,68,496],120110:[499,168,478,66,455],120111:[504,4,374,17,362],120112:[500,6,426,56,409],120113:[696,6,322,19,293],120114:[501,4,530,25,513],120115:[496,4,478,28,434],120116:[501,4,748,46,708],120117:[503,4,426,31,402],120118:[505,163,530,36,465],120119:[505,165,374,39,344],120172:[719,9,748,54,726],120173:[715,7,748,52,723],120174:[718,8,697,77,667],120175:[715,8,697,51,668],120176:[719,8,697,63,684],120177:[719,167,645,37,633],120178:[718,9,801,76,756],120179:[718,167,748,33,709],120180:[718,11,645,29,611],120181:[719,167,645,16,609],120182:[718,14,748,14,732],120183:[718,11,593,32,556],120184:[719,15,968,16,952],120185:[719,11,801,53,785],120186:[718,7,697,69,681],120187:[719,167,748,47,749],120188:[717,11,759,52,748],120189:[719,11,801,49,782],120190:[719,5,697,62,672],120191:[716,8,645,71,632],120192:[718,12,697,32,676],120193:[718,9,748,43,746],120194:[713,4,968,38,968],120195:[718,6,645,32,642],120196:[718,167,748,49,705],120197:[717,167,655,20,601],120198:[537,9,499,63,489],120199:[709,17,520,43,472],120200:[540,7,364,61,354],120201:[717,8,530,52,481],120202:[541,11,416,49,411],120203:[718,166,374,43,348],120204:[536,167,478,43,466],120205:[718,166,520,37,474],120206:[719,11,312,22,302],120207:[718,168,322,35,289],120208:[718,8,374,52,345],120209:[716,9,312,52,304],120210:[537,9,822,27,800],120211:[539,7,541,2,542],120212:[549,8,478,40,455],120213:[544,167,551,36,505],120214:[549,167,488,54,458],120215:[545,8,416,41,414],120216:[542,4,468,60,429],120217:[704,11,322,23,317],120218:[543,11,530,24,529],120219:[536,4,520,28,477],120220:[546,6,748,32,709],120221:[537,8,426,21,417],120222:[536,166,478,25,447],120223:[541,168,374,36,345]};MathJax.Callback.Queue(["initFont",MathJax.OutputJax["HTML-CSS"],"AsanaMathJax_Fraktur"],["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Fraktur/Regular/Main.js"]); | PypiClean |
/HinteDI-0.3.2.tar.gz/HinteDI-0.3.2/README.md | # HinteDI - simple dependency injection system with type hints
This package is a small utility I have been using in my own projects, a super simple tool for basic dependency
injection in python based on type hints and decorators.
This package exposes three classes: HinteDI, InjectionException and InstanceSentinel. Import HinteDI and use
```@HinteDI.singleton``` and ```@HinteDI.instance``` to create dependencies and ```@HinteDI.inject``` to mark a
function as requiring dependency injection. Type hint the dependencies in the injected function and HinteDI will
inject the dependencies for you. You can also use ```@HinteDI.abstract_base```, ```@HinteDI.singleton_implementation```
and ```@HinteDI.instance_implementation``` to create factory-style abstract dependencies resolvable with a set key.
See the docs at [GitHub pages](https://eddiethecubehead.github.io/HinteDI/) for better documentation about the package.
| PypiClean |
/BigJob-0.64.5.tar.gz/BigJob-0.64.5/examples/tutorial/chained_ensembles.py | import os
import sys
import pilot
import traceback
""" DESCRIPTION: Tutorial 2: Chaining Tasks
Note: User must edit PILOT SETUP and TASK DESCRIPTION 1-2 sections
This example will not run if these values are not set.
"""
# ---------------- BEGIN REQUIRED PILOT SETUP -----------------
# Distributed Coordination Service - Redis server and password
REDIS_PWD = ""# Fill in the password to your redis server
REDIS_URL = "redis://%s@localhost:6379" % REDIS_PWD
# Resource Information
HOSTNAME = ""# Remote Resource URL
USER_NAME = ""# Username on the remote resource
SAGA_ADAPTOR = ""# Name of the SAGA adaptor, e.g. fork, sge, pbs, slurm, etc.
# NOTE: See complete list of BigJob supported SAGA adaptors at:
# http://saga-project.github.io/BigJob/sphinxdoc/tutorial/table.html
# Fill in queue and allocation for the given resource
# Note: Set fields to "None" if not applicable
QUEUE = ""# Add queue you want to use
PROJECT = ""# Add project / allocation / account to charge
WALLTIME = ""# Maximum Runtime (minutes) for the Pilot Job
WORKDIR = "" # Path of Resource Working Directory
# This is the directory where BigJob will store its output and error files
SPMD_VARIATION = ""# Specify the WAYNESS of SGE clusters ONLY, valid input '12way' for example.
PROCESSES_PER_NODE = ""# Valid on PBS clusters ONLY - this is the number of processors per node. One processor core is treated as one processor on PBS; e.g. a node with 8 cores has a maximum ppn=8
PILOT_SIZE = ""# Number of cores required for the Pilot-Job
# Job Information
NUMBER_JOBS = ""# The TOTAL number of tasks to run
# Continue to USER DEFINED TASK DESCRIPTION to add
# the required information about the individual tasks.
# ---------------- END REQUIRED PILOT SETUP -----------------
#
def main():
try:
# this describes the parameters and requirements for our pilot job
pilot_description = pilot.PilotComputeDescription()
pilot_description.service_url = "%s://%s@%s" % (SAGA_ADAPTOR,USER_NAME,HOSTNAME)
pilot_description.queue = QUEUE
pilot_description.project = PROJECT
pilot_description.number_of_processes = PILOT_SIZE
pilot_description.working_directory = WORKDIR
pilot_description.walltime = WALLTIME
pilot_description.processes_per_node = PROCESSES_PER_NODE
pilot_description.spmd_variation = SPMD_VARIATION
# create a new pilot job
pilot_compute_service = pilot.PilotComputeService(REDIS_URL)
pilotjob = pilot_compute_service.create_pilot(pilot_description)
# submit 'A' tasks to pilot job
task_set_A = list()
for i in range(NUMBER_JOBS):
# -------- BEGIN USER DEFINED TASK 1 DESCRIPTION --------- #
task_desc = pilot.ComputeUnitDescription()
task_desc.executable = '/bin/echo'
task_desc.arguments = ['I am an $TASK_SET task with id $TASK_NO', ]
task_desc.environment = {'TASK_SET': 'A', 'TASK_NO': i}
task_desc.spmd_variation = 'single'
task_desc.number_of_processes = 1
task_desc.output = 'A-stdout.txt'
task_desc.error = 'A-stderr.txt'
# -------- END USER DEFINED TASK 1 DESCRIPTION --------- #
# Submit task to PilotJob
task = pilotjob.submit_compute_unit(task_desc)
print "* Submitted 'A' task '%s' with id '%s'" % (i, task.get_id())
task_set_A.append(task)
# Chaining tasks i.e submit a compute unit, when compute unit from A is successfully executed.
# A 'B' task reads the content of the output file of an 'A' task and writes it into its own
# output file.
task_set_B = list()
while len(task_set_A) > 0:
for a_task in task_set_A:
if a_task.get_state() == "Done":
print "One 'A' task %s finished. Launching a 'B' task." % (a_task.get_id())
# -------- BEGIN USER DEFINED TASK 2 DESCRIPTION --------- #
task_desc = pilot.ComputeUnitDescription()
task_desc.executable = '/bin/echo'
task_desc.arguments = ['I am a $TASK_SET task with id $TASK_NO', ]
task_desc.environment = {'TASK_SET': 'B', 'TASK_NO': a_task}
task_desc.spmd_variation = 'single'
task_desc.number_of_processes = 1
task_desc.output = 'B-stdout.txt'
task_desc.error = 'B-stderr.txt'
# -------- END USER DEFINED TASK 2 DESCRIPTION --------- #
# Submit task to Pilot Job
task = pilotjob.submit_compute_unit(task_desc)
print "* Submitted 'B' task '%s' with id '%s'" % (i, task.get_id())
task_set_B.append(task)
task_set_A.remove(a_task)
return(0)
except Exception, ex:
print "AN ERROR OCCURRED: %s" % ((str(ex)))
# print a stack trace in case of an exception -
# this can be helpful for debugging the problem
traceback.print_exc()
return(-1)
finally:
# alway try to shut down pilots, otherwise jobs might end up
# lingering in the queue
print ("Terminating BigJob...")
pilotjob.cancel()
pilot_compute_service.cancel()
if __name__ == "__main__":
sys.exit(main()) | PypiClean |
/Art3dUtils-0.3.7.tar.gz/Art3dUtils-0.3.7/art3dutils/utilities.py |
import __builtin__
import os
import locale
import sys
import math
import re
import time
import datetime
import mako.template
import pkg_resources
import xml
from xml.dom.minidom import parse, parseString
#TODO fix this import
#from scroll.utils import pad_box
RANGE_SEPARATOR = '_'
GROUP_SEPARATOR = '-'
GROUP_RANGE_SEPARATOR = ','
SVG_PATH_COMMANDS = 'MLHVCSQTAZ'
MAX_NODES = 200
OUTLINES_CONTAINER_ID = 'SELECTION'
POLYGON_NODES = ['polygon', 'path', 'polyline', 'rect']
GARBAGE_ATTRIBUTES = ['fill', 'stroke', 'style', 'display']
SQ = u'²'
GROUP_DICT = {'n': 'number',
'p': 'pl',
'f': 'floor_number',
's': 'section_number',
'b': 'building_number',
'q': 'quarter_number',
'v': 'variant_number',
't': 'type',
'a': 'mezzanine_number'}
#This dictionary helps translate xml tags generated by 1S (http://www.1c.ru/)
#into valid xml
DICTIONARY_1S = {
'output': u'Выгрузка',
'building_type': u'd2p1:ТипНедвижимости',
'title': ['xmlns:d2p1', 'xmlns:d3p1'],
'building_object': u'd3p1:ОбъектСтроительства',
'apartment': u'd4p1:ОбъектНедвижимости',
'number': 'xmlns:d4p1',
'data': u'РеквизитыОбъектаНедвижимости',
'phase': u'Фаза',
'building': u'Корпус',
'section': u'Секция',
'floor': u'Этаж',
'room_count': u'КоличествоКомнат',
'square': u'Площадь',
'project_no': u'НомерПроектный',
'total_cost': u'Стоимость',
'cost_per_meter': u'Цена',
'status': u'ДоступностьКпродаже',
}
ALPHA = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
'Z']
CYR_INDEX = ['', u'А', u'Б', u'В', u'Г', u'Д', u'Е']
class CellValueError(Exception):
def __init__(self, cell_value=None, row_number=None, sheet_name=None,
column_number=None):
message = 'Cell value error: "%s" (Sheet "%s", Row %s, Column #%s)' % \
(cell_value, sheet_name, row_number, column_number)
print(message)
Exception.__init__(self, message)
class AttrValueError(Exception):
def __init__(self, attr_value=None):
message = 'Node attribute error: "%s"' % attr_value
Exception.__init__(self, message)
class Path(object):
"""Object for path calulations"""
def __init__(self):
self.current_point = None
self.coord_pairs = []
def update(self, new_point, append=True):
self.current_point = new_point
if append:
self.coord_pairs.append(self.current_point)
def move_to(self, x=0.0, y=0.0):
self.update((x, y))
def line_to(self, x=None, y=None, relative=True):
"""Line to new point"""
if relative:
if not x:
x = 0.0
if not y:
y = 0.0
x, y = (self.current_point[0] + float(x),
self.current_point[1] + float(y))
else:
if not x:
x = self.current_point[0]
if not y:
y = self.current_point[1]
self.update((x, y))
def mass_replace(string, word_dict):
"""
Take a text and replace words that match the key in a dictionary
with the associated value, return the changed text
"""
for key in word_dict:
try:
string = string.replace(word_dict[key], key)
except TypeError:
for variant in word_dict[key]:
string = string.replace(variant, key)
return string
class Timer(object):
def __enter__(self):
self.__start = time.time()
def __exit__(self, type, value, traceback):
# Error handling here
self.__finish = time.time()
def duration_in_seconds(self):
return self.__finish - self.__start
def progressbar(iterator, prefix='', size=30, mute=False):
"""A nice progressbar for stdout with 'time to completion'"""
count = len(iterator)
timer = Timer()
def _show(_i, sec):
if not mute:
x = int(size * _i / count)
sys.stdout.write("\r%s[%s%s] %i/%i EST: %s"
% (prefix, "#" * x, "." * (size - x), _i, count,
str(datetime.timedelta(
seconds=sec * (count - _i)))))
sys.stdout.flush()
_show(0, 0)
for i, item in enumerate(iterator):
with timer:
yield item
_show(i+1, timer.duration_in_seconds())
sys.stdout.write("\r \r\n")
def strip_string(string):
"""Strip whitespace and others from a string"""
string = string.strip(' \t\n\r"')
return ' '.join(string.split())
def strip_alpha(string, except_=None):
"""Strip all chars except numerical, '.' and ',' from a string"""
except_str = ''
if except_:
except_str = ''.join(list(except_))
alpha = re.compile(r'[^\d.,%s]+' % except_str)
return alpha.sub('', string)
def parse_shorthand(shorthand_str):
"""
Parse a string / file path for grouped numbers and ranges.
Return: a dict formed according to GROUP_DICT
"""
from itertools import chain
result = {}
clean_name = shorthand_str
groups = re.split(GROUP_SEPARATOR, clean_name)
for group in groups:
group_values = list(group)
#read and remove group identifier
group_id = group_values.pop(0)
group_val_str = ''.join(group_values)
#process ranges
ranges = re.split(GROUP_RANGE_SEPARATOR, group_val_str)
group_lists = []
for range_str in ranges:
range_ = re.split(RANGE_SEPARATOR, range_str)
try:
if len(range_) is 1:
range_ = [int(range_[0])]
if len(range_) is 2:
range_ = range(int(range_[0]), int(range_[1])+1)
except ValueError:
raise BaseException(
'Incorrect value in shorthand '
'record "{record}"'.format(record=shorthand_str))
group_lists.append(range_)
try:
result[GROUP_DICT[group_id]] = list(chain.from_iterable(group_lists))
except KeyError:
raise BaseException(
'Incorrect key in shorthand '
'record "{record}"'.format(record=shorthand_str))
return result
def close_path(points):
"""
Append first coord pair to the end of pair row if its not there.
Useful when you have unclosed paths in svg
"""
pairs = re.split(' ', points)
if pairs[0] != pairs[-1]:
pairs.append(pairs[0])
return ' '.join(pairs)
def process_value(value, enforce_type='int', default=None):
"""'Safely' read a value while enforcing particular type"""
result = default
try:
if type(value) in (str, unicode):
#prepare strings for numeric types
if enforce_type in ('int', 'float'):
value = strip_string(value)
value = strip_alpha(value)
value = value.replace(',', '.')
if enforce_type is 'int':
try:
value = float(value.strip('.'))
except ValueError:
pass
if value is not None:
type_to_enforce = getattr(__builtin__, enforce_type)
result = type_to_enforce(value)
except ValueError:
pass
return result
def read_attr(node, attr_name, enforce_type='int', default=0):
"""Read xml node attribute with `_process_string`"""
value = node.getAttribute(attr_name)
if type(value) is not enforce_type:
try:
return process_value(value, enforce_type, default)
except ValueError:
value = default
else:
return value
def read_cell(sheet, row_num, col_num, enforce_type='int', default=None,
raise_exception=False):
"""Read xls cell with `process_value`"""
cell_value = sheet.cell_value(row_num, col_num)
try:
return process_value(cell_value, enforce_type, default)
except ValueError:
if raise_exception:
raise CellValueError(cell_value, row_num, sheet.name, col_num)
else:
return default
def process_node(node):
"""Get coordinate string from one of the accepted svg tags"""
points = None
if node.nodeName in ('polygon', 'polyline'):
points = strip_string(node.getAttribute('points'))
if node.nodeName == 'rect':
points = process_rectangle_node(node)
if node.nodeName == 'path':
points = process_path_node(node)
if points:
points = close_path(points)
return points
def process_rectangle_node(node):
"""Get coordinate string from `rect` tag"""
x = float(node.getAttribute('x'))
y = float(node.getAttribute('y'))
width = float(node.getAttribute('width'))
height = float(node.getAttribute('height'))
pairs = [','.join((str(x), str(y))),
','.join((str(x+width), str(y))),
','.join((str(x+width), str(y+height))),
','.join((str(x), str(y+height))),
','.join((str(x), str(y)))]
return ' '.join(pairs)
def clean_nodes(nodes, allowed_nodes=POLYGON_NODES):
"""Add only allowed nodes to the result"""
result = []
for node in nodes:
if node.nodeName in allowed_nodes:
result.append(node)
return result
def remove_child_nodes(parent, except_id=None):
"""Remove all child nodes from parent except for that with `except_id`"""
skipped_node = None
victims = parent.childNodes
#TODO investigate this
while len(victims) > 0:
for node in victims:
if except_id:
try:
if node.getAttribute('id') == except_id:
skipped_node = node
except AttributeError:
pass
parent.removeChild(node)
if skipped_node:
parent.appendChild(skipped_node)
def process_path_node(node):
"""Get coordinate string from `path` node"""
d = node.getAttribute('d')
matrix = None
try:
transform = node.getAttribute('transform')
matrix = parse_transform(transform)
except AttributeError:
pass
paths = d.split('z')
pattern = re.compile('([{com}]?[^{com}]+)'.format(com=SVG_PATH_COMMANDS),
re.IGNORECASE)
#for now we're interested in first path only
commands = pattern.findall(paths[0])
path = Path()
for command in commands:
cmd_list = list(command)
action = str(cmd_list.pop(0))
command = ''.join(cmd_list)
coord_pattern = re.compile('([\-]?[^\-,\s]+)')
coords = coord_pattern.findall(command.strip())
coords = [float(coord) for coord in coords]
#TODO rewrite with dict
if action == 'M':
assert len(coords) is 2
path.move_to(x=coords[0], y=coords[1])
if action == 'h':
assert len(coords) is 1
path.line_to(x=coords[0])
if action == 'H':
assert len(coords) is 1
path.line_to(x=coords[0], relative=False)
if action == 'v':
assert len(coords) is 1
path.line_to(y=coords[0])
if action == 'V':
assert len(coords) is 1
path.line_to(y=coords[0], relative=False)
if action == 'l':
assert len(coords) is 2
path.line_to(x=coords[0], y=coords[1])
if action == 'L':
assert len(coords) is 2
path.line_to(x=coords[0], y=coords[1], relative=False)
if action == 'c':
assert len(coords) is 6
path.line_to(x=coords[4], y=coords[5])
if action == 'C':
assert len(coords) is 6
path.line_to(x=coords[4], y=coords[5], relative=False)
if matrix:
new_pairs = []
for pair in path.coord_pairs:
new_pair = apply_matrix_to_point(pair, matrix)
new_pairs.append(new_pair)
path.coord_pairs = new_pairs
return ' '.join('%s,%s' % pair for pair in path.coord_pairs)
def get_outline_container(dom_, skip_id_check=False):
"""
Get outline container from svg.
To qualify the container it must be *the first* `g` element and
must have `id` attribute equal to OUTLINES_CONTAINER_ID
"""
container = None
first_g = dom_.getElementsByTagName('g')[0]
if (not skip_id_check and first_g.getAttribute('id') ==
OUTLINES_CONTAINER_ID) or skip_id_check:
container = first_g
return container
def remove_duplicates(points):
"""Return points without duplicates"""
pairs = re.split(' ', points)
clean_coords = []
for pair in pairs:
if pair not in clean_coords:
clean_coords.append(pair)
return ' '.join(clean_coords)
def get_polygon_nodes(file_path=None, dom_=None, skip_id_check=False,
skip_count_check=False):
"""Return polygon nodes found in svg file/string or dom"""
if not dom_:
try:
dom_ = parse(file_path)
except IOError:
try:
dom_ = parseString(file_path)
except xml.parsers.expat.ExpatError:
print('Bad xml: %s' % file_path)
outline_nodes = None
view_box = get_svg_viewbox(dom_)
outline_container = get_outline_container(dom_, skip_id_check)
if outline_container:
outline_nodes = clean_nodes(outline_container.childNodes)
for node in outline_nodes:
node.margin_x = view_box['margin_x']
node.margin_y = view_box['margin_y']
node.width_ = view_box['width']
node.height_ = view_box['height']
nodes_count = len(outline_nodes)
if not skip_count_check:
assert nodes_count <= MAX_NODES, 'too many nodes(%d) in %s' % \
(nodes_count, file_path)
return outline_nodes
def get_text_nodes(file_path=None, dom_=None):
"""Return text nodes found in svg file/string or dom"""
if not dom_:
try:
dom_ = parse(file_path)
except IOError:
dom_ = parseString(file_path)
outlines_container = get_outline_container(dom_)
nodes = clean_nodes(outlines_container.childNodes, ('text',))
return nodes
def get_svg_viewbox(dom):
"""Get margin and size from svg node"""
svg_node = dom.getElementsByTagName('svg')[0]
view_box = svg_node.getAttribute('viewBox')
sizes = view_box.split(' ')
return {'margin_x': float(sizes[0]),
'margin_y': float(sizes[1]),
'width': float(sizes[2]),
'height': float(sizes[3])}
def node2svg(node):
"""Insert a node into svg template"""
template = pkg_resources.resource_string(__name__, 'svg.mako')
svg_template = mako.template.Template(template)
return svg_template.render(node=node)
def fix_pdf(file_path):
"""Write EOF marker into the end of pdf"""
with open(file_path, 'a') as f:
f.write('%%EOF%%')
def get_options(*args):
"""Provide some common input options for a script"""
OPTIONS_DICT = {
'all_available': {
'short_key': '-a',
'long_key': '--all-available',
'default': False,
'help_string': 'Make all flats available for sale',
'metavar': 'ALL_AVAILABLE',
},
'limit': {
'short_key': '-l',
'long_key': '--limit',
'default': None,
'help_string': 'Limit number of output to',
'metavar': 'LIMIT',
},
'shorthand': {
'short_key': '-s',
'long_key': '--shorthand',
'default': dict(),
'help_string': 'Ranges of values in shorthand',
'metavar': 'SHORTHAND',
},
'dry_run': {
'short_key': '-d',
'long_key': '--dry_run',
'default': None,
'help_string': 'Perform a dry run',
'metavar': 'DRY_RUN'
},
'subjects': {
'short_key': '-w',
'long_key': '--subjects',
'default': None,
'help_string': 'What to generate, separated with commas',
'metavar': 'SKIP_MISSING'
}
}
from optparse import OptionParser
parser = OptionParser()
for option in args:
assert option in OPTIONS_DICT, 'Unknown option "%s"!' % option
option_dict = OPTIONS_DICT[option]
parser.add_option(option_dict['short_key'],
option_dict['long_key'], dest=option,
default=option_dict['default'],
help=option_dict['help_string'],
metavar=option_dict['metavar'])
(options, args) = parser.parse_args()
if hasattr(options, 'subject'):
options.subjects = options.subjects.split(',')
return options
def create_dirs_in_path(path):
"""Recursively create directories in path"""
output_path = os.path.dirname(path)
try:
os.makedirs(output_path)
except OSError as exception:
import errno
if exception.errno != errno.EEXIST:
raise
def simple_multiprocess(callable_, args, processes=2):
"""Basic multiprocessing with Pool"""
from multiprocessing import Pool
pool = Pool(processes=processes)
return pool.map(callable_, args)
def area_coords(node):
"""Return polygon coordinates in html area tag format removing margin"""
result = None
coords = process_node(node)
try:
points = re.split(' ', coords)
pairs = []
for pair in points:
x, y = re.split(',', pair)
margin_x = 0
margin_y = 0
try:
margin_x = float(node.margin_x)
margin_y = float(node.margin_y)
except AttributeError:
pass
new_pair = ','.join([('%d' % (float(x)-margin_x)),
('%d' % (float(y)-margin_y))])
pairs.append(new_pair)
result = ','.join(pairs)
except TypeError:
pass
return result
def set_style_value(node, style_name, style_value):
"""Set style record for a node, removing correspondant attribute"""
style_value = str(style_value)
styles_str = node.getAttribute('style')
new_styles = []
if styles_str:
styles = styles_str.split(';')
for style in styles:
if style:
name, value = style.split(':')
if name == style_name:
style = ':'.join([name, style_value])
new_styles.append(style)
if node.hasAttribute(style_name):
node.removeAttribute(style_name)
else:
new_styles.append(':'.join([style_name, style_value]))
node.setAttribute('style', ';'.join(new_styles))
def make_opaque(path=None, data=None):
"""
Sets opacity to 1 for `g` nodes in the svg file
and return it as string
"""
dom_ = None
if path:
dom_ = parse(path)
if data:
dom_ = parseString(data)
g_nodes = dom_.getElementsByTagName('g')
for node in g_nodes:
set_style_value(node, 'opacity', 1)
return dom_.toxml(encoding='utf-8')
def get_text_coords(node):
"""Get the text node from node and return its coordinates as tuple"""
matrix_string = strip_alpha(node.getAttribute('transform'), except_=' ')
#we get something like this "1 0 0 1 400.3262 253.96"
matrix_values = matrix_string.split(' ')
return float(matrix_values[4]), float(matrix_values[5])
def prepare_outline_svg(dom_):
"""
Remove all `g` nodes other than OUTLINES_CONTAINER from an svg dom.
Remove all separate styling attributes from outline nodes (like `fill`,
`stroke` etc.) in exchange for single `style` attribute. This is made for
compatibility with `pycairo`.
"""
svg_node = dom_.getElementsByTagName('svg')[0]
remove_child_nodes(svg_node, except_id=OUTLINES_CONTAINER_ID)
outline_container = dom_.getElementsByTagName('g')[0]
for outline_node in outline_container.childNodes:
for attr_name in GARBAGE_ATTRIBUTES:
try:
if outline_node.hasAttribute(attr_name):
outline_node.removeAttribute(attr_name)
except AttributeError:
pass
def best_fit(context, box_dim=(0, 0), svg_dim=(0, 0), start_coords=(0, 0)):
"""
Scale context to best fit into box
Credit: http://stackoverflow.com/a/5514344/216042
Gravity to center
"""
box_width = box_dim[0]
box_height = box_dim[1]
svg_width = svg_dim[0]
svg_height = svg_dim[1]
x_start, y_start = start_coords
# formulas
width_ratio = float(box_width) / svg_width
height_ratio = float(box_height) / svg_height
scale = min(height_ratio, width_ratio)
x_margin = (box_width - svg_width * scale) / 2
y_margin = (box_height - svg_height * scale) / 2
context.translate(x_start + x_margin, y_start + y_margin)
context.scale(scale, scale)
def pad_box(canvas_size=(100, 100), padding=(0, 0, 0, 0)):
"""
Pad the box (cut the size and pad coords) and return new dimensions
and coords as tuple of tuples
"""
canvas_width, canvas_height = canvas_size
padding_top, padding_right, padding_bottom, padding_left = padding
padded_box_dim = (canvas_width - padding_right - padding_left,
canvas_height - padding_top - padding_bottom)
return padded_box_dim, (padding_left, padding_top)
def write_scaled_svg(dom_, size, filename, padding):
"""Write scaled svg file with pycairo and rsvg"""
import rsvg, cairo
#TODO try to rewrite without file generation
surface = cairo.SVGSurface(filename, size[0], size[1])
context = cairo.Context(surface)
svg = rsvg.Handle(data=dom_.toxml('utf-8'))
w, h, w1, h1 = svg.get_dimension_data()
padded_box_dim, padded_box_coords = pad_box(size, padding)
best_fit(context, padded_box_dim, (w, h), start_coords=padded_box_coords)
svg.render_cairo(context)
surface.finish()
def mass_set_style(attribs, svg_path=None, svg_data=None,
tag_names=('line', 'polygon', 'circle', 'polyline',
'rect', 'path')):
"""Set style defined in `attribs` dict for particular tags in svg."""
if svg_path:
dom_ = parse(svg_path)
if svg_data:
dom_ = parseString(svg_data)
for tag_name in tag_names:
nodes = dom_.getElementsByTagName(tag_name)
for node in nodes:
for name, value in attribs.items():
set_style_value(node, name, value)
return dom_.toxml(encoding='utf-8')
def mass_set_attr(attribs, svg_path=None, svg_data=None,
tag_names=('line', 'polygon', 'circle', 'polyline',
'rect', 'path')):
"""Set attribute defined in `attribs` dict for particular tags in svg."""
if svg_path:
dom_ = parse(svg_path)
if svg_data:
dom_ = parseString(svg_data)
for tag_name in tag_names:
nodes = dom_.getElementsByTagName(tag_name)
for node in nodes:
node.setAttribute('style', '')
for name, value in attribs.items():
node.setAttribute(name, str(value))
return dom_.toxml(encoding='utf-8')
def set_svg_attribs(attribs, svg_path=None, svg_data=None):
"""Set attributes for an SVG element"""
if svg_path:
dom_ = parse(svg_path)
if svg_data:
dom_ = parseString(svg_data)
svg_element = dom_.getElementsByTagName('svg')[0]
for name, value in attribs.items():
svg_element.setAttribute(name, str(value))
return dom_.toxml(encoding='utf-8')
def remove_outlines(svg_path=None, svg_data=None):
"""Remove outlines container identified by OUTLINES_CONTAINER_ID"""
dom_ = None
if svg_path:
dom_ = parse(svg_path)
if svg_data:
dom_ = parseString(svg_data)
outlines_container = get_outline_container(dom_)
doc_root = dom_.documentElement
doc_root.removeChild(outlines_container)
return dom_.toxml(encoding='utf-8')
def apply_matrix_to_point(point, matrix=(1, 0, 0, 1, 0, 0)):
"""Return a point (tuple) with applied transform matrix"""
x, y = point
a, b, c, d, e, f = matrix
new_x = a * x + c * y + e
new_y = b * x + d * y + f
# print(x, new_x)
return new_x, new_y
def parse_transform(transform_string):
"""
Parse `transform` attribute into matrix tuple
Credit: simpletransform.py
"""
result=re.match("(translate|scale|rotate|skewX|skewY|matrix)\(([^)]*)\)",
transform_string)
matrix = None
#-- translate --
if result.group(1) == "translate":
args=result.group(2).split(",")
dx=float(args[0])
if len(args) == 1:
dy = 0.0
else:
dy = float(args[1])
matrix = (1, 0, dx, 0, 1, dy)
#-- scale --
if result.group(1) == "scale":
args = result.group(2).split(",")
sx = float(args[0])
if len(args) == 1:
sy = sx
else:
sy = float(args[1])
matrix = (sx, 0, 0, 0, sy, 0)
#-- rotate --
if result.group(1) == "rotate":
args = result.group(2).split(",")
a = float(args[0]) * math.pi / 180
if len(args) == 1:
cx, cy = (0.0, 0.0)
else:
cx, cy = args[1:]
matrix = (math.cos(a), -math.sin(a), cx, math.sin(a), math.cos(a), cy)
#-- skewX --
if result.group(1) == "skewX":
a = float(result.group(2)) * math.pi / 180
matrix = (1, math.tan(a), 0, 0, 1, 0)
#-- skewX --
if result.group(1) == "skewX":
a = float(result.group(2)) * math.pi / 180
matrix = (1, 0, 0, math.tan(a), 1, 0)
#-- matrix --
if result.group(1) == "matrix":
a, b, c, d, e, f = result.group(2).split(",")
matrix = (float(a), float(b), float(c), float(d), float(e), float(f))
return matrix
def to_list(value):
"""
Return value as a list. If the value is not iterable -
return a list with a single element
"""
list_ = list()
#TODO a more pythonic way required
if type(value) not in [str, int, unicode]:
list_.extend(value)
else:
list_.append(value)
return list_
def sort_nicely(list_):
"""
Sort the given list in the way that humans expect.
Credit: http://nedbatchelder.com/blog/200712/human_sorting.html
(comment by Toothy)
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
list_.sort(key=alphanum_key)
def localize(value, monetary=False, locale_str='ru_RU.UTF-8', decoded=True,
replace_utf_space=True, default=0):
"""Return localized value"""
if not value:
value = default
locale.setlocale(locale.LC_ALL, locale_str)
result = locale.format('%g', value, grouping=True, monetary=monetary)
if decoded:
result = result.decode('utf-8')
if replace_utf_space:
result = result.replace(u'\xa0', ' ')
return result
def in_shorthand(apt_dict, shorthand_list, **additional):
"""Return `True` if the apartment_dict fits in any of shorthand strings"""
shorthand_list = to_list(shorthand_list)
record_checks = []
for name, value in additional.items():
apt_dict[name] = value
for shorthand in shorthand_list:
checks = []
parsed_data = parse_shorthand(shorthand)
for name, range_ in parsed_data.iteritems():
try:
attr_val = apt_dict.get(name)
except AttributeError:
raise AttributeError('Attribute `{attr_name}`'
' not provided'.format(
attr_name=name))
checks.append(attr_val in range_)
record_checks.append(all(checks))
return any(record_checks)
def pick_file(apt_dict, directory, path=True, **additional):
"""
Find an appropriate file in a directory and return its name or path.
"""
for filename in os.listdir(directory):
basename = os.path.basename(filename)
clean_name = os.path.splitext(basename)[0]
if in_shorthand(apt_dict, clean_name, **additional):
if path:
return os.path.join(directory, filename)
else:
return filename
return None | PypiClean |
/Ldtools-0.8.1.tar.gz/Ldtools-0.8.1/ldtools/origin.py | from __future__ import print_function, unicode_literals
try:
unicode
except NameError:
basestring = unicode = str # Python 3
import datetime
import rdflib
from xml.sax._exceptions import SAXParseException
import logging
from rdflib import compare
from ldtools.backends import RestBackend, ContentNegotiationError
from ldtools.resource import Resource
from ldtools.metamodels import Manager, Model
from ldtools.models import URIRefField, ObjectField
from ldtools.utils import (
get_rdflib_uriref, get_slash_url,
catchKeyboardInterrupt, is_valid_url, reverse_dict, safe_dict,
pyattr2predicate,
urllib2
)
from ldtools.helpers import my_graph_diff
logger = logging.getLogger(__name__)
class OriginManager(Manager):
def post_create_hook(self, origin):
# hook to be overwritten but using application
# origin.timedelta = datetime.timedelta(minutes=2)
return origin
def create(self, uri, BACKEND=None):
uri = get_rdflib_uriref(uri)
if not uri == get_slash_url(uri):
msg = ("URI passed to Origin Manager was not a slash URI: %s. "
"Fixed now." % uri)
logger.debug(msg)
uri = get_slash_url(uri)
backend = BACKEND if BACKEND else RestBackend()
origin = super(OriginManager, self).create(
pk=uri, uri=uri,
backend=backend)
return self.post_create_hook(origin)
def get(self, uri, **kwargs):
"""Retrieves Origin object from Store"""
uri = get_rdflib_uriref(uri)
return super(OriginManager, self).get(pk=uri)
def get_or_create(self, uri, **kwargs):
uri = get_rdflib_uriref(uri)
if not uri == get_slash_url(uri):
msg = ("URI passed to Origin Manager was not a slash URI: %s. "
"Fixed now." % uri)
logger.warning(msg)
uri = get_slash_url(uri)
try:
if kwargs:
logger.warning("kwargs are ignored for get.")
return self.get(uri), False
except self.model.DoesNotExist:
return self.create(uri, **kwargs), True
@catchKeyboardInterrupt
def GET_all(self, depth=2, **kwargs):
"""Crawls or Re-Crawls all Origins. Passes Arguments to GET"""
func = lambda origin: True if not origin.processed else False
for _i in range(depth):
crawl = filter(func, self.all())
if not isinstance(crawl, list): # py3
crawl = list(crawl)
if crawl:
for origin in crawl:
origin.GET(raise_errors=False, **kwargs)
def triple_yield(resource, property, v):
if isinstance(v, resource.__class__):
# If object is referenced in attribute, "de-reference"
return ((resource._uri, property, v._uri))
else:
if not hasattr(v, "n3"):
# print only newly added values without the correct
# type are handled here
# float has no attribute startswith
if (hasattr(v, "startswith") and v.startswith("http://")):
v = rdflib.URIRef(v)
else:
v = rdflib.Literal(v)
return ((resource._uri, property, v))
class Origin(Model):
uri = URIRefField()
objects = OriginManager()
backend = ObjectField()
def add_error(self, error):
if not hasattr(self, 'errors'):
self.errors = []
self.errors.append(error)
def __init__(self, pk=None, **kwargs):
super(Origin, self).__init__(pk=pk, **kwargs)
self.processed = False
def __unicode__(self):
extras = []
if hasattr(self, 'errors'):
for error in self.errors:
extras.append(unicode(error))
if self.processed:
extras.append(u"Processed")
return u" ".join([
unicode(self.uri),
self.backend.__class__.__name__,
] + extras)
def GET(
self,
GRAPH_SIZE_LIMIT=30000,
only_follow_uris=None,
handle_owl_imports=False,
raise_errors=True,
skip_urls=None,
httphandler=None,
):
if not self.uri:
raise Exception("Please provide URI first")
if skip_urls is not None and self.uri.encode("utf8") in skip_urls:
self.add_error("Skipped")
self.processed = True
return
logger.info(u"GET %s..." % self.uri)
if self.has_unsaved_changes():
if self.processed:
raise Exception("Please save all changes before querying "
"again. Merging not supported yet")
else:
logger.warning("There were Resource objects created before "
"processing the resource's origin.")
now = datetime.datetime.now()
# self.timedelta = datetime.timedelta(minutes=1)
if hasattr(self, "timedelta") and hasattr(self, 'last_processed'):
time_since_last_processed = now - self.last_processed
if (time_since_last_processed < self.timedelta):
logger.info(
"Not processing %s again because was processed only %s ago" % (self.uri, time_since_last_processed))
return
self.last_processed = now
try:
data = self.backend.GET(self.uri, httphandler=httphandler)
except urllib2.HTTPError as e:
if e.code in [
401,
403,
503, # Service Temporarily Unavailable
404, # Not Found
]:
self.add_error(e.code)
if raise_errors:
raise e
else:
return
except urllib2.URLError as e:
self.add_error("timeout")
if raise_errors:
raise e
else:
return
except ContentNegotiationError as e:
logger.error(e.message)
if raise_errors:
raise e
else:
return
graph = rdflib.graph.ConjunctiveGraph(identifier=self.uri)
try:
if data:
# Important: Do not pass data=data without publicID=uri because
# relative URIs (#deri) won't be an absolute uri in that case!
publicID = self.uri
reference_time = datetime.datetime.now()
graph.parse(data=data, publicID=publicID, format=self.backend.format)
now = datetime.datetime.now()
self.graph_parse_time = now - reference_time
# normal rdflib.compare does not work correctly with
# ConjunctiveGraph, unless there is only one graph within that
except SAXParseException as e:
self.add_error("SAXParseException")
logger.error("SAXParseException: %s" % self)
if raise_errors:
raise e
else:
return
except rdflib.exceptions.ParserError as e:
self.add_error("ParserError")
logger.error("ParserError: %s" % self)
if raise_errors:
raise e
else:
return
except IOError as e:
self.add_error("IOError")
logger.error("IOError: %s" % self)
if raise_errors:
raise e
else:
return
self.processed = True
if hasattr(self, "errors"):
delattr(self, "errors")
g_length = len(graph)
if g_length > 0:
if len(list(graph.contexts())) > 1:
# detect problems with graph contexts: rdflib can only
# compare graphs with one context. If a graph has more
# contexts this might result in wrong comparisons of graphs
# Still ignored here as ldtools is more robust by doing so.
logger.error("The graph has more than one context. This"
"might cause problems comparing the graphs!")
if g_length > GRAPH_SIZE_LIMIT:
logger.error("Maximum graph size exceeded. Thr graph is %s "
"triples big. Limit is set to %s. The aquired "
"graph exceeds that! Pass GRAPH_SIZE_LIMIT to set it "
"differently." % (g_length, GRAPH_SIZE_LIMIT))
return
if hasattr(self, "_graph"):
# we already assured that there are no unsaved_changes
# --> get_graph() == _graph
logger.info(u"Already crawled: %s. Comparing graphs..." % self.uri)
if compare.to_isomorphic(self._graph) ==\
compare.to_isomorphic(graph):
return
else:
logging.warning("GET retrieved updates for %s!" % self.uri)
my_graph_diff(self._graph, graph)
for resource in self.get_resources():
resource.delete()
delattr(self, "handled")
if hasattr(self, "handled"):
return
self._graph = graph
graph_handler = GraphHandler(
only_follow_uris=only_follow_uris,
handle_owl_imports=handle_owl_imports,
origin=self)
graph_handler.populate_resources(graph=graph)
self.handled = True
def get_graph(self):
"""Processes every Resource and Property related to 'self'"""
#rdflib.ConjunctiveGraph because rdflib.Graph does not allow
# usage of parsing plugins
graph = rdflib.graph.ConjunctiveGraph(identifier=self.uri)
if not hasattr(self, '_graph'):
if hasattr(self, 'errors') and len(self.errors) != 0:
logging.error("Origin %s has Errors --> can't process "
".get_graph()" % self.uri)
return graph
assert hasattr(self, "_graph"), ("graph has to be processed before executing get_graph()")
# Problems with namespacemapping here:
# 1) namespace bindings are not really necessary to validate
# isomorphic graphs but the resulting graph is is different
# if they miss
# 2) doesn't detect duplicate definitions of namespaces
namespace_dict = safe_dict(dict(self._graph.namespace_manager.namespaces()))
for prefix, namespace in safe_dict(namespace_dict).items():
graph.bind(prefix=prefix, namespace=namespace)
new_ns = dict(graph.namespace_manager.namespaces())
assert namespace_dict == new_ns, [(k, v) for k, v in safe_dict(namespace_dict).items() if not k in safe_dict(new_ns).keys()]
for resource in self.get_resources():
# __dict__ converts rdflib.urirefs to strings for keys -->
# convert back the dict's items back to uriref
# {'foaf': 'http:/....', ...}
for property, values in resource.__dict__.items():
# skip internals
if str(property).startswith("_") or property == "pk":
continue
if property.startswith("http://"):
property = rdflib.URIRef(property)
else:
property = pyattr2predicate(property, namespace_dict)
assert isinstance(property, rdflib.URIRef), "property %s is not a URIRef object" % property
if isinstance(values, set):
for v in values:
graph.add(triple_yield(resource, property, v))
else:
v = values
graph.add(triple_yield(resource, property, v))
return graph
def get_resources(self):
return Resource.objects.filter(_origin=self)
def has_unsaved_changes(self):
# objects with changed attributes exist
if any(
resource._has_changes
for resource in self.get_resources()
if (hasattr(resource, '_has_changes') and
resource._has_changes is True)
):
return True
return False
def PUT(self):
assert self.processed
if hasattr(self, "errors"):
assert not self.errors, ("There were errors fetching the "
"resource. PUT not possible")
if not self.has_unsaved_changes():
logging.error("Nothing to PUT for %s!" % self.uri)
return
graph = self.get_graph()
data = graph.serialize(format=self.backend.format)
self.backend.PUT(data=data)
for resource in Resource.objects.filter(_has_changes=True):
resource._has_changes = False
assert not self.has_unsaved_changes(), "something went wrong"
def check_shortcut_consistency():
"""Checks every known Origin for inconsistent namespacemappings"""
global_namespace_dict = {}
for origin in Origin.objects.all():
if hasattr(origin, "_graph"):
for k, v in safe_dict(origin._graph.namespace_manager.namespaces()):
if k in global_namespace_dict:
assert global_namespace_dict[k] == v
else:
global_namespace_dict[k] = v
class GraphHandler(object):
def __init__(self, origin, only_follow_uris, handle_owl_imports):
self.origin = origin
self.handle_owl_imports = handle_owl_imports
if only_follow_uris is not None:
only_follow_uris = [
rdflib.URIRef(u) if not
isinstance(u, rdflib.URIRef) else u for u in only_follow_uris
]
self.only_follow_uris = only_follow_uris
def populate_resources(self, graph):
namespace_short_notation_reverse_dict = {
unicode(rdflib_url): prefix
for rdflib_url, prefix in reverse_dict(
safe_dict(dict(graph.namespace_manager.namespaces()))
).items()
}
reference_time = datetime.datetime.now()
for subject, predicate, obj_ect in graph:
assert hasattr(subject, "n3")
# workaround for rdflib's unicode problems
assert predicate.encode('utf8')
if self.handle_owl_imports:
if (predicate == rdflib.OWL.imports and type(obj_ect) == rdflib.URIRef):
uri = get_slash_url(obj_ect)
origin, created = Origin.objects.get_or_create(uri=uri)
logger.info("Interrupting to process owl:imports %s"
"first" % (origin.uri))
origin.GET()
if ((
self.only_follow_uris is not None and predicate in self.only_follow_uris
) or self.only_follow_uris is None):
if type(obj_ect) == rdflib.URIRef:
# wrong scheme mailto, tel, callto --> should be Literal?
if is_valid_url(obj_ect):
obj_uriref = get_slash_url(obj_ect)
Origin.objects.get_or_create(uri=obj_uriref)
resource, _created = Resource.objects.get_or_create(uri=subject, origin=self.origin)
resource._add_property(predicate, obj_ect, namespace_short_notation_reverse_dict)
now = datetime.datetime.now()
self.origin.graph_handler_time = now - reference_time
for resource in self.origin.get_resources():
resource._has_changes = False | PypiClean |
/NREL_reV-0.8.1-py3-none-any.whl/reV/handlers/multi_year.py | import glob
import time
import logging
import numpy as np
import os
import pandas as pd
from warnings import warn
from rex import Resource
from rex.utilities.utilities import (get_class_properties, parse_year,
get_lat_lon_cols)
from gaps.pipeline import parse_previous_status
from reV.handlers.outputs import Outputs
from reV.config.output_request import SAMOutputRequest
from reV.utilities.exceptions import HandlerRuntimeError, ConfigError
from reV.utilities import log_versions, ModuleName
logger = logging.getLogger(__name__)
class MultiYearGroup:
"""
Handle group parameters
"""
def __init__(self, name, out_dir, source_files=None,
source_dir=None, source_prefix=None,
source_pattern=None,
dsets=('cf_mean',), pass_through_dsets=None):
"""
Parameters
----------
name : str
Group name. Can be ``"none"`` for no collection groups.
out_dir : str
Output directory - used for Pipeline handling.
source_files : str | list, optional
Explicit list of source files. Use either this input *OR*
`source_dir` + `source_prefix`. If this input is
``"PIPELINE"``, the `source_files` input is determined from
the status file of the previous pipeline step.
If ``None``, use `source_dir` and `source_prefix`.
By default, ``None``.
source_dir : str, optional
Directory to extract source files from (must be paired with
`source_prefix`). By default, ``None``.
source_prefix : str, optional
File prefix to search for in source directory (must be
paired with `source_dir`). By default, ``None``.
source_pattern : str, optional
Optional unix-style ``/filepath/pattern*.h5`` to specify the
source files. This takes priority over `source_dir` and
`source_prefix` but is not used if `source_files` are
specified explicitly. By default, ``None``.
dsets : list | tuple, optional
List of datasets to collect. By default, ``('cf_mean',)``.
pass_through_dsets : list | tuple, optional
Optional list of datasets that are identical in the
multi-year files (e.g. input datasets that don't vary from
year to year) that should be copied to the output multi-year
file once without a year suffix or means/stdev calculation.
By default, ``None``.
"""
self._name = name
self._dirout = out_dir
self._source_files = source_files
self._source_dir = source_dir
self._source_prefix = source_prefix
self._source_pattern = source_pattern
self._pass_through_dsets = None
if pass_through_dsets is not None:
self._pass_through_dsets = SAMOutputRequest(pass_through_dsets)
self._dsets = self._parse_dsets(dsets)
def _parse_dsets(self, dsets):
"""Parse a multi-year dataset collection request. Can handle PIPELINE
argument which will find all datasets from one of the files being
collected ignoring meta, time index, and pass_through_dsets
Parameters
----------
dsets : str | list
One or more datasets to collect, or "PIPELINE"
Returns
-------
dsets : SAMOutputRequest
Dataset list object.
"""
if isinstance(dsets, str) and dsets == 'PIPELINE':
files = parse_previous_status(self._dirout, ModuleName.MULTI_YEAR)
with Resource(files[0]) as res:
dsets = [d for d in res
if not d.startswith('time_index')
and d != 'meta'
and d not in self.pass_through_dsets]
dsets = SAMOutputRequest(dsets)
return dsets
@property
def name(self):
"""
Returns
-------
name : str
Group name
"""
name = self._name if self._name.lower() != "none" else None
return name
@property
def source_files(self):
"""
Returns
-------
source_files : list
list of source files to collect from
"""
if self._source_files is not None:
if isinstance(self._source_files, (list, tuple)):
source_files = self._source_files
elif self._source_files == "PIPELINE":
source_files = parse_previous_status(self._dirout,
ModuleName.MULTI_YEAR)
else:
e = "source_files must be a list, tuple, or 'PIPELINE'"
logger.error(e)
raise ConfigError(e)
elif self._source_pattern:
source_files = glob.glob(self._source_pattern)
if not all(fp.endswith('.h5') for fp in source_files):
msg = ('Source pattern resulted in non-h5 files that cannot '
'be collected: {}, pattern: {}'
.format(source_files, self._source_pattern))
logger.error(msg)
raise RuntimeError(msg)
elif self._source_dir and self._source_prefix:
source_files = []
for file in os.listdir(self._source_dir):
if (file.startswith(self._source_prefix)
and file.endswith('.h5') and '_node' not in file):
source_files.append(os.path.join(self._source_dir,
file))
else:
e = ("source_files or both source_dir and "
"source_prefix must be provided")
logger.error(e)
raise ConfigError(e)
if not any(source_files):
e = ('Could not find any source files for '
'multi-year collection group: "{}" in "{}"'
.format(self.name, self._source_dir))
logger.error(e)
raise FileNotFoundError(e)
return source_files
@property
def dsets(self):
"""
Returns
-------
_dsets :list | tuple
Datasets to collect
"""
return self._dsets
@property
def pass_through_dsets(self):
"""Optional list of datasets that are identical in the multi-year
files (e.g. input datasets that don't vary from year to year) that
should be copied to the output multi-year file once without a
year suffix or means/stdev calculation
Returns
-------
list | tuple | None
"""
return self._pass_through_dsets
def _dict_rep(self):
"""Get a dictionary representation of this multi year collection group
Returns
-------
dict
"""
props = get_class_properties(self.__class__)
out = {k: getattr(self, k) for k in props}
out['group'] = self.name
return out
@classmethod
def _factory(cls, out_dir, groups_dict):
"""
Generate dictionary of MultiYearGroup objects for all groups in groups
Parameters
----------
out_dir : str
Output directory, used for Pipeline handling
groups_dict : dict
Dictionary of group parameters, parsed from multi-year config file
Returns
-------
groups : dict
Dictionary of MultiYearGroup objects for each group in groups
"""
groups = {}
for name, kwargs in groups_dict.items():
groups[name] = cls(name, out_dir, **kwargs)
return groups
class MultiYear(Outputs):
"""
Class to handle multiple years of data and:
- collect datasets from multiple years
- compute multi-year means
- compute multi-year standard deviations
- compute multi-year coefficient of variations
"""
def __init__(self, h5_file, group=None, unscale=True, mode='r',
str_decode=True):
"""
Parameters
----------
h5_file : str
Path to .h5 resource file
group : str
Group to collect datasets into
unscale : bool
Boolean flag to automatically unscale variables on extraction
mode : str
Mode to instantiate h5py.File instance
str_decode : bool
Boolean flag to decode the bytestring meta data into normal
strings. Setting this to False will speed up the meta data read.
"""
log_versions(logger)
super().__init__(h5_file, group=group, unscale=unscale, mode=mode,
str_decode=str_decode)
@staticmethod
def _create_dset_name(source_h5, dset):
"""
Create output dataset name by parsing year from source_h5 and
appending to source dataset name.
Parameters
----------
source_h5 : str
Path to source .h5 file to copy data from
dset : str
Dataset to copy
Returns
-------
dset_out : str
Ouput dataset name
"""
f_name = os.path.basename(source_h5)
year = parse_year(f_name)
dset_out = "{}-{}".format(dset, year)
return dset_out
def _copy_time_index(self, source_h5):
"""
Copy time_index from source_h5 to time_index-{year} in multiyear .h5
Parameters
----------
source_h5 : str
Path to source .h5 file to copy data from
"""
dset_out = self._create_dset_name(source_h5, 'time_index')
if dset_out not in self.datasets:
logger.debug("- Collecting time_index from {}"
.format(os.path.basename(source_h5)))
with Outputs(source_h5, mode='r') as f_in:
time_index = f_in.h5['time_index'][...]
self._create_dset(dset_out, time_index.shape, time_index.dtype,
data=time_index)
def _copy_dset(self, source_h5, dset, meta=None, pass_through=False):
"""
Copy dset_in from source_h5 to multiyear .h5
Parameters
----------
source_h5 : str
Path to source .h5 file to copy data from
dset : str
Dataset to copy
meta : pandas.DataFrame
If provided confirm that source meta matches given meta
pass_through : bool
Flag to just pass through dataset without name modifications
(no differences between years, no means or stdevs)
"""
if pass_through:
dset_out = dset
else:
dset_out = self._create_dset_name(source_h5, dset)
if dset_out not in self.datasets:
logger.debug("- Collecting {} from {}"
.format(dset, os.path.basename(source_h5)))
with Outputs(source_h5, unscale=False, mode='r') as f_in:
if meta is not None:
cols = get_lat_lon_cols(meta)
source_meta = f_in.meta
if len(meta) != len(source_meta):
msg = ('Meta data has different lengths between '
'collection files! Found {} and {}'
.format(len(meta), len(source_meta)))
logger.error(msg)
raise HandlerRuntimeError(msg)
if not np.allclose(meta[cols], source_meta[cols]):
msg = ('Coordinates do not match between '
'collection files!')
logger.warning(msg)
warn(msg)
_, ds_dtype, ds_chunks = f_in.get_dset_properties(dset)
ds_attrs = f_in.get_attrs(dset=dset)
ds_data = f_in[dset]
self._create_dset(dset_out, ds_data.shape, ds_dtype,
chunks=ds_chunks, attrs=ds_attrs, data=ds_data)
@staticmethod
def parse_source_files_pattern(source_files):
"""Parse a source_files pattern that can be either an explicit list of
source files or a unix-style /filepath/pattern*.h5 and either way
return a list of explicit filepaths.
Parameters
----------
source_files : list | str
List of .h5 files to collect datasets from. This can also be a
unix-style /filepath/pattern*.h5 to find .h5 files to collect,
however all resulting files must be .h5 otherwise an exception will
be raised. NOTE: .h5 file names must indicate the year the data
pertains to
Returns
-------
source_files : list
List of .h5 filepaths.
"""
if isinstance(source_files, str) and '*' in source_files:
source_files = glob.glob(source_files)
elif isinstance(source_files, str):
source_files = [source_files]
elif not isinstance(source_files, (list, tuple)):
msg = ('Cannot recognize source_files type: {} {}'
.format(source_files, type(source_files)))
logger.error(msg)
raise TypeError(msg)
if not all(fp.endswith('.h5') for fp in source_files):
msg = ('Non-h5 files cannot be collected: {}'.format(source_files))
logger.error(msg)
raise RuntimeError(msg)
return source_files
def collect(self, source_files, dset, profiles=False, pass_through=False):
"""
Collect dataset dset from given list of h5 files
Parameters
----------
source_files : list | str
List of .h5 files to collect datasets from. This can also be a
unix-style /filepath/pattern*.h5 to find .h5 files to collect,
however all resulting files must be .h5 otherwise an exception will
be raised. NOTE: .h5 file names must indicate the year the data
pertains to
dset : str
Dataset to collect
profiles : bool
Boolean flag to indicate if profiles are being collected
If True also collect time_index
pass_through : bool
Flag to just pass through dataset without name modifications
(no differences between years, no means or stdevs)
"""
source_files = self.parse_source_files_pattern(source_files)
with Outputs(source_files[0], mode='r') as f_in:
meta = f_in.h5['meta'][...]
if 'meta' not in self.datasets:
logger.debug("Copying meta")
self._create_dset('meta', meta.shape, meta.dtype,
data=meta)
meta = pd.DataFrame(meta)
for year_h5 in source_files:
if profiles:
self._copy_time_index(year_h5)
self._copy_dset(year_h5, dset, meta=meta,
pass_through=pass_through)
def _get_source_dsets(self, dset_out):
"""
Extract all available annual datasets associated with dset
Parameters
----------
dset_out : str
Output dataset to find source datasets for
Returns
-------
source_dsets : list
List of annual datasets
"""
dset = os.path.basename(dset_out).split("-")[0]
logger.debug('-- source_dset root = {}'.format(dset))
my_dset = ["{}-{}".format(dset, val) for val in ['means', 'stdev']]
source_dsets = [ds for ds in self.datasets if dset in ds
and ds not in my_dset]
if dset_out in source_dsets:
source_dsets.remove(dset_out)
return source_dsets
def _update_dset(self, dset_out, dset_data):
"""
Update dataset, create if needed
Parameters
----------
dset_out : str
Dataset name
dset_data : ndarray
Dataset data to write to disc
"""
if dset_out in self.datasets:
logger.debug("- Updating {}".format(dset_out))
self[dset_out] = dset_data
else:
logger.debug("- Creating {}".format(dset_out))
source_dset = self._get_source_dsets(dset_out)[0]
_, ds_dtype, ds_chunks = self.get_dset_properties(source_dset)
ds_attrs = self.get_attrs(dset=source_dset)
self._add_dset(dset_out, dset_data, ds_dtype,
chunks=ds_chunks, attrs=ds_attrs)
def _compute_means(self, dset_out):
"""
Compute multi-year means for given dataset
Parameters
----------
dset_out : str
Multi-year means dataset name
Returns
-------
my_means : ndarray
Array of multi-year means
"""
source_dsets = self._get_source_dsets(dset_out)
logger.debug('\t- Computing {} from {}'.format(dset_out, source_dsets))
my_means = np.zeros(len(self), dtype='float32')
for ds in source_dsets:
if self.h5[ds].shape == my_means.shape:
my_means += self[ds]
else:
raise HandlerRuntimeError("{} shape {} should be {}"
.format(ds, self.h5[ds].shape,
my_means.shape))
my_means /= len(source_dsets)
self._update_dset(dset_out, my_means)
return my_means
def means(self, dset):
"""
Extract or compute multi-year means for given source dset
Parameters
----------
dset : str
Dataset of interest
Returns
-------
my_means : ndarray
Array of multi-year means for dataset of interest
"""
my_dset = "{}-means".format(dset)
if my_dset in self.datasets:
my_means = self[my_dset]
else:
my_means = self._compute_means(my_dset)
return my_means
def _compute_stdev(self, dset_out, means=None):
"""
Compute multi-year standard deviation for given dataset
Parameters
----------
dset_out : str
Multi-year stdev dataset name
means : ndarray
Array of pre-computed means
Returns
-------
my_stdev : ndarray
Array of multi-year standard deviations
"""
if means is None:
means = self._compute_means("{}-means".format(dset_out))
source_dsets = self._get_source_dsets(dset_out)
my_stdev = np.zeros(means.shape, dtype='float32')
for ds in source_dsets:
if self.h5[ds].shape == my_stdev.shape:
my_stdev += (self[ds] - means)**2
else:
raise HandlerRuntimeError("{} shape {} should be {}"
.format(ds, self.h5[ds].shape,
my_stdev.shape))
my_stdev = np.sqrt(my_stdev / len(source_dsets))
self._update_dset(dset_out, my_stdev)
return my_stdev
def stdev(self, dset):
"""
Extract or compute multi-year standard deviation for given source dset
Parameters
----------
dset : str
Dataset of interest
Returns
-------
my_stdev : ndarray
Array of multi-year standard deviation for dataset of interest
"""
my_dset = "{}-stdev".format(dset)
if my_dset in self.datasets:
my_stdev = self[my_dset]
else:
my_means = self.means(dset)
my_stdev = self._compute_stdev(my_dset, means=my_means)
return my_stdev
def CV(self, dset):
"""
Extract or compute multi-year coefficient of variation for given
source dset
Parameters
----------
dset : str
Dataset of interest
Returns
-------
my_cv : ndarray
Array of multi-year coefficient of variation for
dataset of interest
"""
my_cv = self.stdev(dset) / self.means(dset)
return my_cv
@classmethod
def is_profile(cls, source_files, dset):
"""
Check dataset in source files to see if it is a profile.
Parameters
----------
source_files : list | str
List of .h5 files to collect datasets from. This can also be a
unix-style /filepath/pattern*.h5 to find .h5 files to collect,
however all resulting files must be .h5 otherwise an exception will
be raised. NOTE: .h5 file names must indicate the year the data
pertains to
dset : str
Dataset to collect
Returns
-------
is_profile : bool
True if profile, False if not.
"""
source_files = cls.parse_source_files_pattern(source_files)
with Outputs(source_files[0]) as f:
if dset not in f.datasets:
raise KeyError('Dataset "{}" not found in source file: "{}"'
.format(dset, source_files[0]))
shape, _, _ = f.get_dset_properties(dset)
return len(shape) == 2
@classmethod
def pass_through(cls, my_file, source_files, dset, group=None):
"""
Pass through a dataset that is identical in all source files to a
dataset of the same name in the output multi-year file.
Parameters
----------
my_file : str
Path to multi-year .h5 file
source_files : list | str
List of .h5 files to collect datasets from. This can also be a
unix-style /filepath/pattern*.h5 to find .h5 files to collect,
however all resulting files must be .h5 otherwise an exception will
be raised. NOTE: .h5 file names must indicate the year the data
pertains to
dset : str
Dataset to pass through (will also be the name of the output
dataset in my_file)
group : str
Group to collect datasets into
"""
source_files = cls.parse_source_files_pattern(source_files)
logger.info('Passing through {} into {}.'
.format(dset, my_file))
with cls(my_file, mode='a', group=group) as my:
my.collect(source_files, dset, pass_through=True)
@classmethod
def collect_means(cls, my_file, source_files, dset, group=None):
"""
Collect and compute multi-year means for given dataset
Parameters
----------
my_file : str
Path to multi-year .h5 file
source_files : list | str
List of .h5 files to collect datasets from. This can also be a
unix-style /filepath/pattern*.h5 to find .h5 files to collect,
however all resulting files must be .h5 otherwise an exception will
be raised. NOTE: .h5 file names must indicate the year the data
pertains to
dset : str
Dataset to collect
group : str
Group to collect datasets into
"""
logger.info('Collecting {} into {} '
'and computing multi-year means and standard deviations.'
.format(dset, my_file))
source_files = cls.parse_source_files_pattern(source_files)
with cls(my_file, mode='a', group=group) as my:
my.collect(source_files, dset)
means = my._compute_means("{}-means".format(dset))
my._compute_stdev("{}-stdev".format(dset), means=means)
@classmethod
def collect_profiles(cls, my_file, source_files, dset, group=None):
"""
Collect multi-year profiles associated with given dataset
Parameters
----------
my_file : str
Path to multi-year .h5 file
source_files : list | str
List of .h5 files to collect datasets from. This can also be a
unix-style /filepath/pattern*.h5 to find .h5 files to collect,
however all resulting files must be .h5 otherwise an exception will
be raised. NOTE: .h5 file names must indicate the year the data
pertains to
dset : str
Profiles dataset to collect
group : str
Group to collect datasets into
"""
logger.info('Collecting {} into {}'.format(dset, my_file))
source_files = cls.parse_source_files_pattern(source_files)
with cls(my_file, mode='a', group=group) as my:
my.collect(source_files, dset, profiles=True)
def my_collect_groups(out_fpath, groups):
"""Collect all groups into a single multi-year HDF5 file.
``reV`` multi-year combines ``reV`` generation data from multiple
years (typically stored in separate files) into a single multi-year
file. Each dataset in the multi-year file is labeled with the
corresponding years, and multi-year averages of the yearly datasets
are also computed.
Parameters
----------
out_fpath : str
Path to multi-year HDF5 file to use for multi-year
collection.
groups : dict
Dictionary of collection groups and their parameters. This
should be a dictionary mapping group names (keys) to a set
of key word arguments (values) that can be used to initialize
:class:`~reV.handlers.multi_year.MultiYearGroup` (excluding the
required ``name`` and ``out_dir`` inputs, which are populated
automatically). For example::
groups = {
"none": {
"dsets": [
"cf_profile",
"cf_mean",
"ghi_mean",
"lcoe_fcr",
],
"source_dir": "./",
"source_prefix": "",
"pass_through_dsets": [
"capital_cost",
"fixed_operating_cost",
"system_capacity",
"fixed_charge_rate",
"variable_operating_cost",
]
},
"solar_group": {
"source_files": "PIPELINE",
"dsets": [
"cf_profile_ac",
"cf_mean_ac",
"ac",
"dc",
"clipped_power"
],
"pass_through_dsets": [
"system_capacity_ac",
"dc_ac_ratio"
]
},
...
}
The group names will be used as the HDF5 file group name under
which the collected data will be stored. You can have exactly
one group with the name ``"none"`` for a "no group" collection
(this is typically what you want and all you need to specify).
"""
if not out_fpath.endswith(".h5"):
out_fpath = '{}.h5'.format(out_fpath)
out_dir = os.path.dirname(out_fpath)
groups = MultiYearGroup._factory(out_dir, groups)
group_params = {name: group._dict_rep()
for name, group in groups.items()}
logger.info('Multi-year collection is being run with output path: {}'
.format(out_fpath))
ts = time.time()
for group_name, group in group_params.items():
logger.info('- Collecting datasets "{}" from "{}" into "{}/"'
.format(group['dsets'], group['source_files'],
group_name))
t0 = time.time()
for dset in group['dsets']:
if MultiYear.is_profile(group['source_files'], dset):
MultiYear.collect_profiles(out_fpath, group['source_files'],
dset, group=group['group'])
else:
MultiYear.collect_means(out_fpath, group['source_files'],
dset, group=group['group'])
if group.get('pass_through_dsets', None) is not None:
for dset in group['pass_through_dsets']:
MultiYear.pass_through(out_fpath, group['source_files'],
dset, group=group['group'])
runtime = (time.time() - t0) / 60
logger.info('- {} collection completed in: {:.2f} min.'
.format(group_name, runtime))
runtime = (time.time() - ts) / 60
logger.info('Multi-year collection completed in : {:.2f} min.'
.format(runtime))
return out_fpath | PypiClean |
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/ui/alignment_utils.py | from muntjac.ui.alignment import Alignment
from muntjac.ui.layout import IAlignmentHandler
class AlignmentUtils(object):
"""Helper class for setting alignments using a short notation.
Supported notation is:
- t, top for top alignment
- m, middle for vertical center alignment
- b, bottom for bottom alignment
- l, left for left alignment
- c, center for horizontal center alignment
- r, right for right alignment
@deprecated: replaced by L{Alignment}.
"""
_horizontalMask = (IAlignmentHandler.ALIGNMENT_LEFT
| IAlignmentHandler.ALIGNMENT_HORIZONTAL_CENTER
| IAlignmentHandler.ALIGNMENT_RIGHT)
_verticalMask = (IAlignmentHandler.ALIGNMENT_TOP
| IAlignmentHandler.ALIGNMENT_VERTICAL_CENTER
| IAlignmentHandler.ALIGNMENT_BOTTOM)
_alignmentStrings = dict()
@classmethod
def addMapping(cls, alignment, *values):
for s in values:
cls._alignmentStrings[s] = alignment
@classmethod
def setComponentAlignment(cls, parent, component, alignment):
"""Set the alignment for the component using short notation.
@param parent:
@param component:
@param alignment:
String containing one or two alignment strings. If short
notation "r", "t", etc is used valid strings include
"r", "rt", "tr", "t". If the longer notation is used the
alignments should be separated by a space e.g.
"right", "right top", "top right", "top". It is valid to
mix short and long notation but they must be separated by a
space e.g. "r top".
@raise ValueError:
"""
if alignment is None or len(alignment) == 0:
raise ValueError, ('alignment for setComponentAlignment() '
'cannot be null or empty')
currentAlignment = parent.getComponentAlignment(
component).getBitMask()
if len(alignment) == 1:
# Use short form "t","l",...
currentAlignment = cls.parseAlignment(alignment[:1],
currentAlignment)
elif len(alignment) == 2:
# Use short form "tr","lb",...
currentAlignment = cls.parseAlignment(alignment[:1],
currentAlignment)
currentAlignment = cls.parseAlignment(alignment[1:2],
currentAlignment)
else:
# Alignments are separated by space
strings = alignment.split(' ')
if len(strings) > 2:
raise ValueError, ('alignment for setComponentAlignment() '
'should not contain more than 2 alignments')
for alignmentString in strings:
currentAlignment = cls.parseAlignment(alignmentString,
currentAlignment)
horizontalAlignment = currentAlignment & cls._horizontalMask
verticalAlignment = currentAlignment & cls._verticalMask
parent.setComponentAlignment(component,
Alignment(horizontalAlignment + verticalAlignment))
@classmethod
def parseAlignment(cls, alignmentString, alignment):
"""Parse alignmentString which contains one alignment (horizontal
or vertical) and return and updated version of the passed alignment
where the alignment in one direction has been changed. If the passed
alignmentString is unknown an exception is thrown
@raise ValueError:
"""
parsed = cls._alignmentStrings.get( alignmentString.lower() )
if parsed is None:
raise ValueError, ('Could not parse alignment string \''
+ alignmentString + '\'')
if parsed & cls._horizontalMask != 0:
# Get the vertical alignment from the current alignment
vertical = alignment & cls._verticalMask
# Add the parsed horizontal alignment
alignment = vertical | parsed
else:
# Get the horizontal alignment from the current alignment
horizontal = alignment & cls._horizontalMask
# Add the parsed vertical alignment
alignment = horizontal | parsed
return alignment
AlignmentUtils.addMapping(IAlignmentHandler.ALIGNMENT_TOP, 't', 'top')
AlignmentUtils.addMapping(IAlignmentHandler.ALIGNMENT_BOTTOM, 'b', 'bottom')
AlignmentUtils.addMapping(IAlignmentHandler.ALIGNMENT_VERTICAL_CENTER, 'm', 'middle')
AlignmentUtils.addMapping(IAlignmentHandler.ALIGNMENT_LEFT, 'l', 'left')
AlignmentUtils.addMapping(IAlignmentHandler.ALIGNMENT_RIGHT, 'r', 'right')
AlignmentUtils.addMapping(IAlignmentHandler.ALIGNMENT_HORIZONTAL_CENTER, 'c', 'center') | PypiClean |
/DiffCapAnalyzer-0.1.1.tar.gz/DiffCapAnalyzer-0.1.1/diffcapanalyzer/databasewrappers.py | import io
import os
import pandas as pd
from pandas import ExcelWriter
import pandas.io.sql as pd_sql
import sqlite3 as sql
import scipy
import numpy as np
from diffcapanalyzer.chachifuncs import load_sep_cycles
from diffcapanalyzer.chachifuncs import get_clean_cycles
from diffcapanalyzer.chachifuncs import get_clean_sets
from diffcapanalyzer.chachifuncs import calc_dq_dqdv
from diffcapanalyzer.descriptors import dfsortpeakvals
from diffcapanalyzer.databasefuncs import init_master_table
from diffcapanalyzer.databasefuncs import update_database_newtable
from diffcapanalyzer.databasefuncs import update_master_table
from diffcapanalyzer.databasefuncs import get_file_from_database
def process_data(file_name, database_name, decoded_dataframe,
datatype, windowlength=9,
polyorder=3):
"""Takes raw file, separates cycles, cleans cycles,
gets the descriptors, saves descriptors for each cycle
into database, puts cycles back together, and then saves
resulting cleaned data. """
if not os.path.exists(database_name):
init_master_table(database_name)
names_list = get_table_names(database_name)
core_file_name = get_filename_pref(file_name)
if core_file_name + 'CleanSet' in names_list:
return
else:
parse_update_master(core_file_name, database_name,
datatype, decoded_dataframe)
cycle_dict = load_sep_cycles(core_file_name,
database_name,
datatype)
clean_cycle_dict = get_clean_cycles(cycle_dict,
core_file_name,
database_name,
datatype,
windowlength,
polyorder)
clean_set_df = get_clean_sets(clean_cycle_dict,
core_file_name,
database_name)
return
def parse_update_master(
core_file_name,
database_name,
datatype,
decoded_dataframe):
"""Takes the file and calculates dq/dv from the raw data,
uploads that ot the database as the raw data, and
updates the master table with prefixes useful for accessing
that data related to the file uploaded."""
# name = get_filename_pref(file_name)
update_database_newtable(decoded_dataframe,
core_file_name + 'UnalteredRaw',
database_name)
data = calc_dq_dqdv(decoded_dataframe, datatype)
update_database_newtable(data, core_file_name + 'Raw',
database_name)
update_dict = {'Dataset_Name': core_file_name,
'Raw_Data_Prefix': core_file_name + 'Raw',
'Cleaned_Data_Prefix': core_file_name + 'CleanSet',
'Cleaned_Cycles_Prefix': core_file_name + '-CleanCycle',
'Descriptors_Prefix': core_file_name + '-descriptors',
'Model_Parameters_Prefix': core_file_name + 'ModParams',
'Model_Points_Prefix': core_file_name + '-ModPoints',
'Raw_Cycle_Prefix': core_file_name + '-Cycle',
'Original_Data_Prefix': core_file_name + 'UnalteredRaw'}
update_master_table(update_dict, database_name)
return
def macc_chardis(row):
"""Assigns an integer to distinguish rows of
charging cycles from those of discharging
cycles. -1 for discharging and +1 for charging."""
if row['Md'] == 'D':
return -1
else:
return 1
def if_file_exists_in_db(database_name, file_name):
"""Checks if file exists in the given database
by checking the list of table names for the
table name corresponding to the whole CleanSet."""
if os.path.exists(database_name):
names_list = get_table_names(database_name)
filename_pref = get_filename_pref(file_name)
if filename_pref + 'CleanSet' in names_list:
ans = True
else:
ans = False
else:
ans = False
return ans
def get_db_filenames(database_name):
""" This is used to populate the dropdown menu, so users can
only access their data if their name is in the user column"""
con = sql.connect(database_name)
c = con.cursor()
names_list = []
for row in c.execute(
"""SELECT Dataset_Name FROM master_table"""):
names_list.append(row[0])
con.close()
exists_list = []
for name in names_list:
if if_file_exists_in_db(database_name, name):
exists_list.append(name)
return exists_list
def get_filename_pref(file_name):
"""Splits the filename apart from the path
and the extension. This is used as part of
the identifier for individual file uploads."""
while '/' in file_name:
file_name = file_name.split('/', maxsplit=1)[1]
while '\\' in file_name:
file_name = file_name.split('\\', maxsplit=1)[1]
file_name_pref = file_name.split('.')[0]
return file_name_pref
def get_table_names(database):
"""Returns all the names of tables that exist in the database"""
if os.path.exists(database):
con = sql.connect(database)
c = con.cursor()
names_list = []
for row in c.execute(
"""SELECT name FROM sqlite_master WHERE type='table'"""):
names_list.append(row[0])
con.close()
return names_list | PypiClean |
/Firefly_III_API_Client-2.0.5.0-py3-none-any.whl/firefly_iii_client/paths/v1_webhooks_id_messages_message_id/get.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from firefly_iii_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from firefly_iii_client import schemas # noqa: F401
from firefly_iii_client.model.unauthenticated import Unauthenticated
from firefly_iii_client.model.bad_request import BadRequest
from firefly_iii_client.model.internal_exception import InternalException
from firefly_iii_client.model.webhook_message_single import WebhookMessageSingle
from firefly_iii_client.model.not_found import NotFound
from . import path
# Header params
XTraceIdSchema = schemas.UUIDSchema
RequestRequiredHeaderParams = typing_extensions.TypedDict(
'RequestRequiredHeaderParams',
{
}
)
RequestOptionalHeaderParams = typing_extensions.TypedDict(
'RequestOptionalHeaderParams',
{
'X-Trace-Id': typing.Union[XTraceIdSchema, str, uuid.UUID, ],
},
total=False
)
class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams):
pass
request_header_x_trace_id = api_client.HeaderParameter(
name="X-Trace-Id",
style=api_client.ParameterStyle.SIMPLE,
schema=XTraceIdSchema,
)
# Path params
IdSchema = schemas.StrSchema
MessageIdSchema = schemas.IntSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'id': typing.Union[IdSchema, str, ],
'messageId': typing.Union[MessageIdSchema, decimal.Decimal, int, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_id = api_client.PathParameter(
name="id",
style=api_client.ParameterStyle.SIMPLE,
schema=IdSchema,
required=True,
)
request_path_message_id = api_client.PathParameter(
name="messageId",
style=api_client.ParameterStyle.SIMPLE,
schema=MessageIdSchema,
required=True,
)
_auth = [
'firefly_iii_auth',
]
SchemaFor200ResponseBodyApplicationVndApijson = WebhookMessageSingle
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationVndApijson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/vnd.api+json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationVndApijson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthenticated
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor404ResponseBodyApplicationJson = NotFound
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor404ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
content={
'application/json': api_client.MediaType(
schema=SchemaFor404ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalException
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
'401': _response_for_401,
'404': _response_for_404,
'500': _response_for_500,
}
_all_accept_content_types = (
'application/vnd.api+json',
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _get_single_webhook_message_oapg(
self,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _get_single_webhook_message_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _get_single_webhook_message_oapg(
self,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _get_single_webhook_message_oapg(
self,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Get a single message from a webhook.
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestHeaderParams, header_params)
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_id,
request_path_message_id,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
for parameter in (
request_header_x_trace_id,
):
parameter_data = header_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_headers.extend(serialized_data)
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class GetSingleWebhookMessage(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def get_single_webhook_message(
self,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get_single_webhook_message(
self,
skip_deserialization: typing_extensions.Literal[True],
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get_single_webhook_message(
self,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get_single_webhook_message(
self,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_single_webhook_message_oapg(
header_params=header_params,
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
skip_deserialization: typing_extensions.Literal[True],
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_single_webhook_message_oapg(
header_params=header_params,
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/BGISimtool-0.1-py3-none-any.whl/bgisimtool/histogram.py |
# Copyright (c) 2018 Swann Levasseur
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
# stdev in histogram
from numpy import sqrt, histogram
import numpy as np
from math import pow
import logging
# ------------------------------------------------------------------------------
# logger
# ------------------------------------------------------------------------------
histogram_logger = logging.getLogger('bgisimtool.histogram')
# ------------------------------------------------------------------------------
# Class Histogram DO NOT USE, kept for compatibility with old stuff
# ------------------------------------------------------------------------------
class Histogram:
"""A class representing a histogram"""
# ----------------------
# Magic methods
# ----------------------
def __init__(self, bin_center=None, content=None, bin_width=None):
self.mean = 0
self.mean_err = 0
self.stdev = 0
self.stdev_err = 0
self.integral = 0
self.min_bin_value = 0
# used for the iterable feature
self.index = 0
# if the user provided all the list and they are all the same size, fill the hist
if not((bin_center is None) or (content is None) or (bin_width is None)) and (len(bin_center) == len(content) == len(bin_width)):
self._bin_center = bin_center
self._bin_width = bin_width
self._bin_content = content
self.updateAll()
else:
# else all empty
self._bin_center = []
self._bin_width = []
self._bin_content = []
# return the size of the hist in terms of number of bins
def __len__(self):
return len(self._bin_center)
# to make it act like a list
def __getitem__(self, index):
return self._bin_center[index], self._bin_content[index], self._bin_width[index]
def __setitem__(self, index, data_tupple):
self._bin_center[index], self._bin_content[index], self._bin_width[index] = data_tupple
def __delitem__(self, index):
del self._bin_center[index]
del self._bin_content[index]
del self._bin_width[index]
def __str__(self):
description = 'Class Histogram\n'
description += 'Bin number : {0:d} \n'.format(len(self._bin_center))
description += 'Mean : {0:.3f} \n'.format(self.mean)
description += 'Mean error : {0:.3f} \n'.format(self.mean_err)
description += 'Stdev : {0:.3f} \n'.format(self.stdev)
description += 'Stdev error : {0:.3f} \n'.format(self.stdev_err)
description += 'Integral : {0:d} \n'.format(self.integral)
description += 'Min bin value : {0:d} \n'.format(self.min_bin_value)
return description
def __iter__(self):
self.index = 0
return self
def __next__(self):
try:
result = (self._bin_center[self.index], self._bin_content[self.index], self._bin_width[self.index])
except IndexError:
raise StopIteration
self.index += 1
return result
# ----------------------
# manually fill
# ----------------------
def fillFromRawPosition(self, positions, bin_size, limits=None):
""" fillFromRawPosition( [12,13,14...], 55e-6, (-5,14) )
Fill the _bin_center and _bin_content and _bin_width members of a Histogram object using the
positional data provided in position and the bin size provided by bin_size.
limits is a tupple with the range between witch to fill the histogram (it can be set larger or smaller to the actual min, max of the positions).
If no range is provided, the range is automaticaly assigned to the min and max value of positions
! Make sure positons, bin_size and limits are all in the same unit e.g mm !
"""
if limits is None:
limits = ()
# reset to avoid stacking data
self.reset()
# Constants (any function call done more than once for the same value..)
min_pos = min(positions)
max_pos = max(positions)
# check for range to create the right offset
if limits:
# check if the range provided is at least partially overlapping with the positions
if max_pos < limits[0] or min_pos > limits[1]:
print('Limits provided in fillFromRawPosition are not overlapping with the positions')
print('Abort')
return
offset = limits[0]
bin_number = int((limits[1] - limits[0]) / bin_size) + 1
else:
# range not provided, deduce from data
# get offset from min value of positions
offset = min_pos
# Find the good number of bins
bin_number = int((max_pos - min_pos) / bin_size) + 1
# Apply the offset to all positions, this is needed to easily fill the hist afterward
positions_offset = list(map(lambda a: a - offset, positions))
# fill the bin_centers and widths. The content are set to zeros
for x in range(bin_number):
self._bin_center.append((x * bin_size) + (bin_size / 2) + offset)
self._bin_width.append(bin_size)
self._bin_content.append(0)
# Fill the bin_content, this is where positions_offset is used
for val in positions_offset:
index = int(val / bin_size)
if index >= 0:
self._bin_content[index] += 1
#update the histogram data
self.updateAll()
# ----------------------
# add one bin
# ----------------------
def add(self, data_tupple):
self._bin_center.append(data_tupple[0])
self._bin_content.append(data_tupple[1])
self._bin_width.append(data_tupple[2])
# ----------------------
# Fill from file
# ----------------------
def fillFromFile(self, filename):
""" Read the content of a .txt file containing a histogram.
The histogram object is updated after the the file if loaded.
filename is a string.
"""
# reset to avoid stacking data
self.reset()
# Open file
with open(filename, 'r') as file:
# skip the header lines
next(file)
next(file)
# read the data
for line in file:
# load a line and split it in a list of floats
num = list(map(float, line.split(',')))
if len(num) == 3:
self._bin_center.append(num[0])
self._bin_content.append(num[1])
self._bin_width.append(num[2])
elif len(num) == 2:
self._bin_center.append(num[0])
self._bin_content.append(num[1])
self._bin_width.append(1)
else:
raise ValueError
# update with all new stuff
self.updateAll()
# ----------------------
# write 2 file
# ----------------------
def write2File(self, filename):
""" Write the current content of the histogram object into a simple text file
filename is a string
"""
# Open file
with open(filename, 'w') as file:
# write the header line
file.write('Bin center, bin content, bin width')
# read the data
for i in range(len(self._bin_center)):
# write each line
file.write(str(self._bin_center[i]) + ', ' + self._bin_content[i] + ', ' + self._bin_width[i])
# ----------------------
# Histogram modification
# ----------------------
def conditionalBining(self, content_limit=5):
""" Scan the histogram and merge bins in order to have all bins > content_limit.
The scanning is done from the first to the last bin
!This may result in a histogram with a non constant bin width!"""
temp_bin_center = []
temp_bin_content = []
temp_bin_width = []
# you have to initialize it with the first bin..
current_bin_center = 0
current_bin_content = 0
current_bin_width = 0
lower_bin_wall = self._bin_center[0] - (self._bin_width[0] / 2)
for i in range(len(self._bin_center)):
current_bin_center = lower_bin_wall + ((current_bin_width + self._bin_width[i]) / 2)
current_bin_content += self._bin_content[i]
current_bin_width += self._bin_width[i]
# test
if current_bin_content >= content_limit:
temp_bin_center.append(current_bin_center)
temp_bin_content.append(current_bin_content)
temp_bin_width.append(current_bin_width)
# reset
lower_bin_wall += current_bin_width
current_bin_center = 0
current_bin_content = 0
current_bin_width = 0
# add the last bin
temp_bin_center[-1] = (lower_bin_wall - temp_bin_width[-1]) + ((current_bin_width + temp_bin_width[-1]) / 2)
temp_bin_content[-1] += current_bin_content
temp_bin_width[-1] += current_bin_width
# update hist
self._bin_center = temp_bin_center
self._bin_width = temp_bin_width
self._bin_content = temp_bin_content
def reframe(self, limits):
# new hist
bin_center = []
bin_content = []
bin_width = []
# for each existing bin
for i in range(len(self._bin_center)):
# if within the limits, we keep it
if self._bin_center[i] > limits[0] and self._bin_center[i] < limits[1]:
bin_center.append(self._bin_center[i])
bin_content.append(self._bin_content[i])
bin_width.append(self._bin_width[i])
# overwrite the current hist
self._bin_center = bin_center
self._bin_content = bin_content
self._bin_width = bin_width
def rebin(self, bin_factor):
"""Function to rebin the histogram y a certain factor !WARNING WIP! """
# declare temp container to contain the new values of the hist
temp_center_list = []
temp_content_list = []
temp_width_list = []
temp_center = 0
temp_content = 0
temp_width = 0
lower_bin_wall = self._bin_center[0] - (self._bin_width[0] / 2)
# counter
bin_cnt = 0
# for each value in the hist
for i in range(len(self._bin_center)):
temp_content += self._bin_content[i]
temp_width += self._bin_width[i]
temp_center = lower_bin_wall + (temp_width / 2)
bin_cnt += 1
# if the bin is not combined yet
if bin_cnt >= bin_factor:
temp_center_list.append(temp_center)
temp_content_list.append(temp_content)
temp_width_list.append(temp_width)
# reset
lower_bin_wall += temp_width
temp_content = 0
temp_center = 0
temp_width = 0
# reset the cnt
bin_cnt = 0
# replace existing data
self._bin_center = temp_center_list
self._bin_width = temp_width_list
self._bin_content = temp_content_list
def fillBlank(self, bin_number):
"""Fill the histogram object with 'bin_number' bins of content None
This function should be used when a user want to fill a histogram in a custom way and need the memory to e allocated already"""
# reset to make sure we don't stack data
self.reset()
# for all bins requested add an empty bin
for _ in range(bin_number):
self._bin_center.append(None)
self._bin_content.append(None)
self._bin_width.append(None)
def areaNormalize(self):
# make sure we have the integral
self._updateIntegral()
# for each bin
for i in range(len(self._bin_content)):
self._bin_content[i] = self._bin_content[i] / self.integral
def selectiveBinning(self, min_bin_count):
""" Remove every bin with less than 'min_bin_count' from the parent histogram."""
# for each bin
new_center = []
new_content = []
new_width = []
for index in range(len(self._bin_center)):
# if the current bin has more or equal than min_bin_count, keep that bin
if self._bin_content[index] >= min_bin_count:
new_center.append(self._bin_center[index])
new_content.append(self._bin_content[index])
new_width.append(self._bin_width[index])
# overwrite the content
self._bin_center = new_center
self._bin_content = new_content
self._bin_width = new_width
def filterByExpected(self, exp, lim):
""" Remove every bin where the corresponding item in the 'exp' is lower than lim
"""
new_center = []
new_content = []
new_width = []
new_exp = []
# for each position in exp
for i in range(len(exp)):
# if exp is above or equal to lim
if exp[i] >= lim:
new_center.append(self._bin_center[i])
new_content.append(self._bin_content[i])
new_width.append(self._bin_width[i])
new_exp.append(exp[i])
# overwrite the old values
self._bin_center = new_center
self._bin_content = new_content
self._bin_width = new_width
return new_exp
# ----------------------
# Update attributes
# ----------------------
def _updateIntegral(self):
self.integral = sum(self._bin_content)
def _updateMinBinValue(self):
self.min_bin_value = max(self._bin_content)
for val in self._bin_content:
if val < self.min_bin_value:
self.min_bin_value = val
def _updateMean(self):
if self.integral > 0:
S = 0
for i in range(len(self._bin_center)):
S += self._bin_center[i] * self._bin_content[i]
self.mean = S / self.integral
else:
self.mean = 0
def _updateMeanErr(self):
if self.integral > 0:
self.mean_err = self.stdev / sqrt(sum(self._bin_content))
else:
self.mean_err = 0
def _updateStdevErr(self):
if self.integral > 0:
# only valid for gaussian distribution
self.stdev_err = self.stdev * (1 / sqrt(2 * self.integral - 2))
else:
self.stdev_err = 0
def _updateStdev(self):
if self.integral > 0:
S = 0
for i in range(len(self._bin_center)):
S += self._bin_content[i] * pow((self._bin_center[i] - self.mean), 2)
self.stdev = sqrt(S / self.integral)
else:
self.stdev = 0
def updateAll(self):
# we combine mean, integral and min_bin_value in the same loop to gain speed
# get mean, integral, min_bin_value
S = 0
self.min_bin_value = max(self._bin_content)
self.integral = 0
for i in range(len(self._bin_center)):
# mean
S += self._bin_center[i] * self._bin_content[i]
# integral
self.integral += self._bin_content[i]
# min bin content
if self._bin_content[i] < self.min_bin_value:
self.min_bin_value = self._bin_content[i]
# check if intergral is zero, then no other meas
if self.integral > 2:
self.mean = S / self.integral
# Get stdev, mean err, stdeverr
self._updateStdev()
# mean err
self.mean_err = self.stdev / sqrt(sum(self._bin_content))
# stdevErr
self.stdev_err = self.stdev * (1 / sqrt(2 * self.integral - 2))
else:
self.mean = 0
self.mean_err = 0
self.stdev_err = 0
self.stdev = 0
def reset(self):
self._bin_center = []
self._bin_content = []
self._bin_width = []
self.index = 0
self.mean = 0
self.mean_err = 0
self.stdev = 0
self.stdev_err = 0
self.integral = 0
self.min_bin_value = 0
# ------------------------------------------------------------------------------
# Class Histogram
# ------------------------------------------------------------------------------
class HistogramV2:
"""A class representing a histogramV2"""
# ----------------------
# Magic methods
# ----------------------
def __init__(self, positions=None, bin_width=None, bin_count=None, log_level=logging.WARNING):
self.mean = 0
self.mean_err = 0
self.stdev = 0
self.stdev_err = 0
self.integral = 0
self.min_bin_value = 0
self._bin_edge = None
self._bin_center = None
self._bin_content = None
self.logger = logging.getLogger('bgisimtool.histogram.histogramV2')
self.logger.setLevel(log_level)
# if we can fill it, we do it
if positions is not None and (bin_width is not None or bin_count is not None):
self.fill_from_positions(positions, bin_width=bin_width, bin_number=bin_count)
# return the size of the hist in terms of number of bins
def __len__(self):
return len(self._bin_center)
# ----------------------
# manually fill
# ----------------------
def fill_from_positions(self, positions, bin_width=None, bin_number=None, limits=None):
# if no bin width or bin number is given, the default of bin_number = 10 from numpy is used.
if bin_number is None and bin_width is not None:
min_pos = min(positions)
max_pos = max(positions)
if limits:
bin_count = int((limits[1] - limits[0]) / bin_width)
else:
bin_count = int((max_pos - min_pos) / bin_width)
else:
bin_count = bin_number
# get hist,
self._bin_content, self._bin_edge = np.histogram(positions, bins=bin_count, range=limits)
self._bin_center = 0.5 * (self._bin_edge[1:] + self._bin_edge[:-1])
self.update_all()
def fill_by_value(self, bin_content, bin_edge, bin_center):
if len(bin_edge) != len(bin_content + 1):
self.logger.error("Error, Bin_edge should be N+1 of bin_content.")
return
else:
self._bin_center = bin_center
self._bin_content = bin_content
self._bin_edge = bin_edge
self.update_all()
# ----------------------
# Histogram modification
# ----------------------
def conditional_binning(self, content_limit=5):
""" Scan the histogram and merge bins in order to have all bins > content_limit.
The scanning is done from the first to the last bin
!This may result in a histogram with a non constant bin width!"""
# for each bin in the hist
temp_bin_content = []
temp_bin_center = []
temp_bin_edge = []
# bin edge is N+1 longer than the rest and the left bin is always preserved
temp_bin_edge.append(self._bin_edge[0])
# loop through all bins
max_index = len(self._bin_content) - 1
index = 0
last = False
while index < len(self._bin_content):
# while the bin content is below content_limit, combine bins
temp_bin = self._bin_content[index]
while temp_bin < content_limit:
index += 1
# if we are not at the last bins
if index <= max_index:
temp_bin += self._bin_content[index]
# if we are at the last bin and it's too small
else:
last = True
# we need to remove one to the index to avoid conflict with bin_edge
index -= 1
break
#
if not last:
temp_bin_content.append(temp_bin)
temp_bin_edge.append(self._bin_edge[index + 1])
# re_calc the bin center
temp_bin_center.append(0.5 * (temp_bin_edge[-1] + temp_bin_edge[-2]))
else:
temp_bin_content[-1] += temp_bin
temp_bin_edge[-1] = self._bin_edge[index + 1]
temp_bin_center[-1] = (0.5 * (temp_bin_edge[-1] + temp_bin_edge[-2]))
index += 1
# update the hist
self._bin_content = np.array(temp_bin_content)
self._bin_center = np.array(temp_bin_center)
self._bin_edge = np.array(temp_bin_edge)
# update_hists
self.update_all()
def reframe(self, limits):
# new hist
temp_bin_content = []
temp_bin_center = []
temp_bin_edge = []
# for each existing bin
for i in range(len(self._bin_center)):
# if within the limits, we keep it
if self._bin_edge[i] > limits[0] and self._bin_edge[i+1] < limits[1]:
temp_bin_center.append(self._bin_center[i])
temp_bin_content.append(self._bin_content[i])
temp_bin_edge.append(self._bin_edge[i])
# keep the last in edge
temp_bin_edge.append(self._bin_edge[i + 1])
# overwrite the current hist
self._bin_center = np.array(temp_bin_center)
self._bin_content = np.array(temp_bin_content)
self._bin_edge = np.array(temp_bin_edge)
# update
self.update_all()
def area_normalize(self):
self._bin_content = self._bin_content / self.integral
self.update_all()
def filter_by_expected(self, exp, lim):
""" set to 0 every bin where the corresponding item in the 'exp' is lower than lim
"""
# for each position in exp
for i in range(len(exp)):
# if exp is above or equal to lim
if exp[i] < lim:
self._bin_content[i] = 0
exp[i] = 0
# update
self.update_all()
return exp
# ----------------------
# Update attributes
# ----------------------
def update_all(self):
self.min_bin_value = min(self._bin_content)
self.integral = np.sum(self._bin_content)
if self.integral > 2:
self.mean = np.average(self._bin_center, weights=self._bin_content)
self.stdev = sqrt(np.average((self._bin_center - self.mean) ** 2, weights=self._bin_content))
self.stdev_err = self.stdev * (1 / np.sqrt(2 * self.integral - 2))
self.mean_err = self.stdev / np.sqrt(np.sum(self._bin_content))
else:
self.logger.warning("Cannot update all, < 2 samples in histogram")
self.mean_err = 0
self.stdev_err = 0
self.stdev = 0 | PypiClean |
/MeaxisNetwork-0.1.5.tar.gz/MeaxisNetwork-0.1.5/meaxisnetwork/creations.py | import requests
import json
from .utils import check_response, HTTPRequest
class Creations():
def __init__(self, **kwargs):
self.logcook = None
self.sesscook = None
for name, value in kwargs.items():
if name == "logcook":
self.logcook = value
if name == "sesscook":
self.sesscook = value
from .credentials import Credentials
try:
if Credentials.credentials != None and Credentials.credentials["logcook"] is not None:
if self.logcook != None and self.sesscook != None:
return
Credentials = Credentials.credentials
self.logcook = Credentials["logcook"]
self.sesscook = Credentials["sesscook"]
except:
pass
CreationRequest = HTTPRequest('creations', 'get', cookies = {"logcook": self.logcook, "sesscook": self.sesscook})
self.CreationList = CreationRequest.json()
def get_by_id(self, **kwargs):
for name, value in kwargs.items():
if name == "logcook":
self.logcook = value
if name == "sesscook":
self.sesscook = value
if name == "id" or "creationid":
self.creationid = value
CreationRequest = HTTPRequest(f"creations/{self.creationid}", 'get', cookies = {"logcook": self.logcook, "sesscook": self.sesscook})
return CreationRequest.json()
def create(self, **kwargs):
CreationName = None
description = None
for name, value in kwargs.items():
if name == "name":
CreationName = value
if name == "description" or name == "desc":
description = value
PostRequest = HTTPRequest('creations/new', 'post', cookies = {"logcook": self.logcook, "sesscook": self.sesscook}, data = {"name": CreationName, "description": description})
return PostRequest.json()
def update(self, **kwargs):
UpdateDict = {}
CreationID = None
for name, value in kwargs.items():
vars(self)[key] = value
cookies = {"logcook": self.logcook, "sesscook": self.sesscook}
PatchRequest = HTTPRequest(f'creations/{CreationID}/update', 'patch', cookies=cookies, data=UpdateDict)
GetRequest = HTTPRequest(f"creations/{CreationID}", 'get', cookies=cookies)
for name, value in GetRequest.json().items():
vars(self)[name] = value | PypiClean |
/Firefly_III_API_Client-2.0.5.0-py3-none-any.whl/firefly_iii_client/paths/v1_piggy_banks_id_attachments/get.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from firefly_iii_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from firefly_iii_client import schemas # noqa: F401
from firefly_iii_client.model.unauthenticated import Unauthenticated
from firefly_iii_client.model.bad_request import BadRequest
from firefly_iii_client.model.attachment_array import AttachmentArray
from firefly_iii_client.model.internal_exception import InternalException
from firefly_iii_client.model.not_found import NotFound
from . import path
# Query params
PageSchema = schemas.IntSchema
RequestRequiredQueryParams = typing_extensions.TypedDict(
'RequestRequiredQueryParams',
{
}
)
RequestOptionalQueryParams = typing_extensions.TypedDict(
'RequestOptionalQueryParams',
{
'page': typing.Union[PageSchema, decimal.Decimal, int, ],
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_page = api_client.QueryParameter(
name="page",
style=api_client.ParameterStyle.FORM,
schema=PageSchema,
explode=True,
)
# Header params
XTraceIdSchema = schemas.UUIDSchema
RequestRequiredHeaderParams = typing_extensions.TypedDict(
'RequestRequiredHeaderParams',
{
}
)
RequestOptionalHeaderParams = typing_extensions.TypedDict(
'RequestOptionalHeaderParams',
{
'X-Trace-Id': typing.Union[XTraceIdSchema, str, uuid.UUID, ],
},
total=False
)
class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams):
pass
request_header_x_trace_id = api_client.HeaderParameter(
name="X-Trace-Id",
style=api_client.ParameterStyle.SIMPLE,
schema=XTraceIdSchema,
)
# Path params
IdSchema = schemas.StrSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'id': typing.Union[IdSchema, str, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_id = api_client.PathParameter(
name="id",
style=api_client.ParameterStyle.SIMPLE,
schema=IdSchema,
required=True,
)
_auth = [
'firefly_iii_auth',
]
SchemaFor200ResponseBodyApplicationVndApijson = AttachmentArray
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationVndApijson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/vnd.api+json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationVndApijson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthenticated
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor404ResponseBodyApplicationJson = NotFound
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor404ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
content={
'application/json': api_client.MediaType(
schema=SchemaFor404ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalException
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
'401': _response_for_401,
'404': _response_for_404,
'500': _response_for_500,
}
_all_accept_content_types = (
'application/vnd.api+json',
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _list_attachment_by_piggy_bank_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _list_attachment_by_piggy_bank_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _list_attachment_by_piggy_bank_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _list_attachment_by_piggy_bank_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Lists all attachments.
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params)
self._verify_typed_dict_inputs_oapg(RequestHeaderParams, header_params)
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_id,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
prefix_separator_iterator = None
for parameter in (
request_query_page,
):
parameter_data = query_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
if prefix_separator_iterator is None:
prefix_separator_iterator = parameter.get_prefix_separator_iterator()
serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
for serialized_value in serialized_data.values():
used_path += serialized_value
_headers = HTTPHeaderDict()
for parameter in (
request_header_x_trace_id,
):
parameter_data = header_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_headers.extend(serialized_data)
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class ListAttachmentByPiggyBank(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def list_attachment_by_piggy_bank(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def list_attachment_by_piggy_bank(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def list_attachment_by_piggy_bank(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def list_attachment_by_piggy_bank(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._list_attachment_by_piggy_bank_oapg(
query_params=query_params,
header_params=header_params,
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._list_attachment_by_piggy_bank_oapg(
query_params=query_params,
header_params=header_params,
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/resources/oira/script/chunks/4994.32ad41031b90628a0e18.min.js | "use strict";(self.webpackChunk_patternslib_patternslib=self.webpackChunk_patternslib_patternslib||[]).push([[4994],{83565:function(n,e,t){var o=t(87537),s=t.n(o),r=t(23645),l=t.n(r)()(s());l.push([n.id,"/*!\n * StackOverflow.com light style\n *\n * @stackoverflow/stacks v0.56.0\n * https://github.com/StackExchange/Stacks\n */.hljs{display:block;overflow-x:auto;padding:.5em;color:#2f3337;background:#f6f6f6}.hljs-comment{color:#656e77}.hljs-keyword,.hljs-selector-tag,.hljs-meta-keyword,.hljs-doctag,.hljs-section,.hljs-selector-class,.hljs-meta,.hljs-selector-pseudo,.hljs-attr{color:#015692}.hljs-attribute{color:#803378}.hljs-name,.hljs-type,.hljs-number,.hljs-selector-id,.hljs-quote,.hljs-template-tag,.hljs-built_in,.hljs-title,.hljs-literal{color:#b75501}.hljs-string,.hljs-regexp,.hljs-symbol,.hljs-variable,.hljs-template-variable,.hljs-link,.hljs-selector-attr,.hljs-meta-string{color:#54790d}.hljs-bullet,.hljs-code{color:#535a60}.hljs-deletion{color:#c02d2e}.hljs-addition{color:#2f6f44}.hljs-emphasis{font-style:italic}.hljs-strong{font-weight:bold}","",{version:3,sources:["webpack://./node_modules/highlight.js/styles/stackoverflow-light.css"],names:[],mappings:"AAAA;;;;;EAAA,CAOA,MACE,aAAA,CACA,eAAA,CACA,YAAA,CACA,aAAA,CACA,kBAAA,CAGF,cACE,aAAA,CAGF,gJASE,aAAA,CAGF,gBACE,aAAA,CAGF,6HASE,aAAA,CAGF,+HAQE,aAAA,CAGF,wBAEE,aAAA,CAGF,eACE,aAAA,CAGF,eACE,aAAA,CAGF,eACE,iBAAA,CAGF,aACE,gBAAA",sourcesContent:["/*!\n * StackOverflow.com light style\n *\n * @stackoverflow/stacks v0.56.0\n * https://github.com/StackExchange/Stacks\n */\n\n.hljs {\n display: block;\n overflow-x: auto;\n padding: 0.5em;\n color: #2f3337;\n background: #f6f6f6;\n}\n\n.hljs-comment {\n color: #656e77;\n}\n\n.hljs-keyword,\n.hljs-selector-tag,\n.hljs-meta-keyword,\n.hljs-doctag,\n.hljs-section,\n.hljs-selector-class,\n.hljs-meta,\n.hljs-selector-pseudo,\n.hljs-attr {\n color: #015692;\n}\n\n.hljs-attribute {\n color: #803378;\n}\n\n.hljs-name,\n.hljs-type,\n.hljs-number,\n.hljs-selector-id,\n.hljs-quote,\n.hljs-template-tag,\n.hljs-built_in,\n.hljs-title,\n.hljs-literal {\n color: #b75501;\n}\n\n.hljs-string,\n.hljs-regexp,\n.hljs-symbol,\n.hljs-variable,\n.hljs-template-variable,\n.hljs-link,\n.hljs-selector-attr,\n.hljs-meta-string {\n color: #54790d;\n}\n\n.hljs-bullet,\n.hljs-code {\n color: #535a60;\n}\n\n.hljs-deletion {\n color: #c02d2e;\n}\n\n.hljs-addition {\n color: #2f6f44;\n}\n\n.hljs-emphasis {\n font-style: italic;\n}\n\n.hljs-strong {\n font-weight: bold;\n}\n"],sourceRoot:""}]),e.Z=l},23645:function(n){n.exports=function(n){var e=[];return e.toString=function(){return this.map((function(e){var t="",o=void 0!==e[5];return e[4]&&(t+="@supports (".concat(e[4],") {")),e[2]&&(t+="@media ".concat(e[2]," {")),o&&(t+="@layer".concat(e[5].length>0?" ".concat(e[5]):""," {")),t+=n(e),o&&(t+="}"),e[2]&&(t+="}"),e[4]&&(t+="}"),t})).join("")},e.i=function(n,t,o,s,r){"string"==typeof n&&(n=[[null,n,void 0]]);var l={};if(o)for(var a=0;a<this.length;a++){var c=this[a][0];null!=c&&(l[c]=!0)}for(var i=0;i<n.length;i++){var u=[].concat(n[i]);o&&l[u[0]]||(void 0!==r&&(void 0===u[5]||(u[1]="@layer".concat(u[5].length>0?" ".concat(u[5]):""," {").concat(u[1],"}")),u[5]=r),t&&(u[2]?(u[1]="@media ".concat(u[2]," {").concat(u[1],"}"),u[2]=t):u[2]=t),s&&(u[4]?(u[1]="@supports (".concat(u[4],") {").concat(u[1],"}"),u[4]=s):u[4]="".concat(s)),e.push(u))}},e}},87537:function(n){n.exports=function(n){var e=n[1],t=n[3];if(!t)return e;if("function"==typeof btoa){var o=btoa(unescape(encodeURIComponent(JSON.stringify(t)))),s="sourceMappingURL=data:application/json;charset=utf-8;base64,".concat(o),r="/*# ".concat(s," */");return[e].concat([r]).join("\n")}return[e].join("\n")}},4994:function(n,e,t){t.r(e);var o=t(93379),s=t.n(o),r=t(7795),l=t.n(r),a=t(3565),c=t.n(a),i=t(19216),u=t.n(i),h=t(44589),f=t.n(h),p=t(83565),d={};d.styleTagTransform=f(),d.setAttributes=c(),d.insert=function(n){var e=document.head.querySelectorAll("*")[0];e?document.head.insertBefore(n,e):document.head.append(n)},d.domAPI=l(),d.insertStyleElement=u();s()(p.Z,d);e.default=p.Z&&p.Z.locals?p.Z.locals:void 0},93379:function(n){var e=[];function t(n){for(var t=-1,o=0;o<e.length;o++)if(e[o].identifier===n){t=o;break}return t}function o(n,o){for(var r={},l=[],a=0;a<n.length;a++){var c=n[a],i=o.base?c[0]+o.base:c[0],u=r[i]||0,h="".concat(i," ").concat(u);r[i]=u+1;var f=t(h),p={css:c[1],media:c[2],sourceMap:c[3],supports:c[4],layer:c[5]};if(-1!==f)e[f].references++,e[f].updater(p);else{var d=s(p,o);o.byIndex=a,e.splice(a,0,{identifier:h,updater:d,references:1})}l.push(h)}return l}function s(n,e){var t=e.domAPI(e);t.update(n);return function(e){if(e){if(e.css===n.css&&e.media===n.media&&e.sourceMap===n.sourceMap&&e.supports===n.supports&&e.layer===n.layer)return;t.update(n=e)}else t.remove()}}n.exports=function(n,s){var r=o(n=n||[],s=s||{});return function(n){n=n||[];for(var l=0;l<r.length;l++){var a=t(r[l]);e[a].references--}for(var c=o(n,s),i=0;i<r.length;i++){var u=t(r[i]);0===e[u].references&&(e[u].updater(),e.splice(u,1))}r=c}}},19216:function(n){n.exports=function(n){var e=document.createElement("style");return n.setAttributes(e,n.attributes),n.insert(e,n.options),e}},3565:function(n,e,t){n.exports=function(n){var e=t.nc;e&&n.setAttribute("nonce",e)}},7795:function(n){n.exports=function(n){if("undefined"==typeof document)return{update:function(){},remove:function(){}};var e=n.insertStyleElement(n);return{update:function(t){!function(n,e,t){var o="";t.supports&&(o+="@supports (".concat(t.supports,") {")),t.media&&(o+="@media ".concat(t.media," {"));var s=void 0!==t.layer;s&&(o+="@layer".concat(t.layer.length>0?" ".concat(t.layer):""," {")),o+=t.css,s&&(o+="}"),t.media&&(o+="}"),t.supports&&(o+="}");var r=t.sourceMap;r&&"undefined"!=typeof btoa&&(o+="\n/*# sourceMappingURL=data:application/json;base64,".concat(btoa(unescape(encodeURIComponent(JSON.stringify(r))))," */")),e.styleTagTransform(o,n,e.options)}(e,n,t)},remove:function(){!function(n){if(null===n.parentNode)return!1;n.parentNode.removeChild(n)}(e)}}}},44589:function(n){n.exports=function(n,e){if(e.styleSheet)e.styleSheet.cssText=n;else{for(;e.firstChild;)e.removeChild(e.firstChild);e.appendChild(document.createTextNode(n))}}}}]);
//# sourceMappingURL=4994.32ad41031b90628a0e18.min.js.map | PypiClean |
/564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/directives.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Tuple
from strawberry.extensions import SchemaExtension
from strawberry.types import Info
from strawberry.types.nodes import convert_arguments
from strawberry.utils.await_maybe import await_maybe
if TYPE_CHECKING:
from graphql import DirectiveNode, GraphQLResolveInfo
from strawberry.directive import StrawberryDirective
from strawberry.field import StrawberryField
from strawberry.schema.schema import Schema
from strawberry.utils.await_maybe import AwaitableOrValue
SPECIFIED_DIRECTIVES = {"include", "skip"}
class DirectivesExtension(SchemaExtension):
async def resolve(
self, _next, root, info: GraphQLResolveInfo, *args, **kwargs
) -> AwaitableOrValue[Any]:
value = await await_maybe(_next(root, info, *args, **kwargs))
for directive in info.field_nodes[0].directives:
if directive.name.value in SPECIFIED_DIRECTIVES:
continue
strawberry_directive, arguments = process_directive(directive, value, info)
value = await await_maybe(strawberry_directive.resolver(**arguments))
return value
class DirectivesExtensionSync(SchemaExtension):
def resolve(
self, _next, root, info: GraphQLResolveInfo, *args, **kwargs
) -> AwaitableOrValue[Any]:
value = _next(root, info, *args, **kwargs)
for directive in info.field_nodes[0].directives:
if directive.name.value in SPECIFIED_DIRECTIVES:
continue
strawberry_directive, arguments = process_directive(directive, value, info)
value = strawberry_directive.resolver(**arguments)
return value
def process_directive(
directive: DirectiveNode,
value: Any,
info: GraphQLResolveInfo,
) -> Tuple[StrawberryDirective, Dict[str, Any]]:
"""Get a `StrawberryDirective` from ``directive` and prepare its arguments."""
directive_name = directive.name.value
schema: Schema = info.schema._strawberry_schema # type: ignore
strawberry_directive = schema.get_directive_by_name(directive_name)
assert strawberry_directive is not None, f"Directive {directive_name} not found"
arguments = convert_arguments(info=info, nodes=directive.arguments)
resolver = strawberry_directive.resolver
info_parameter = resolver.info_parameter
value_parameter = resolver.value_parameter
if info_parameter:
field: StrawberryField = schema.get_field_for_type( # type: ignore
field_name=info.field_name,
type_name=info.parent_type.name,
)
arguments[info_parameter.name] = Info(_raw_info=info, _field=field)
if value_parameter:
arguments[value_parameter.name] = value
return strawberry_directive, arguments | PypiClean |
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/_source/plugins/a11yhelp/dialogs/a11yhelp.js | /*
Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.dialog.add( 'a11yHelp', function( editor )
{
var lang = editor.lang.accessibilityHelp,
id = CKEDITOR.tools.getNextNumber();
// CharCode <-> KeyChar.
var keyMap =
{
8 : "BACKSPACE",
9 : "TAB" ,
13 : "ENTER" ,
16 : "SHIFT" ,
17 : "CTRL" ,
18 : "ALT" ,
19 : "PAUSE" ,
20 : "CAPSLOCK" ,
27 : "ESCAPE" ,
33 : "PAGE UP" ,
34 : "PAGE DOWN" ,
35 : "END" ,
36 : "HOME" ,
37 : "LEFT ARROW" ,
38 : "UP ARROW" ,
39 : "RIGHT ARROW" ,
40 : "DOWN ARROW" ,
45 : "INSERT" ,
46 : "DELETE" ,
91 : "LEFT WINDOW KEY" ,
92 : "RIGHT WINDOW KEY" ,
93 : "SELECT KEY" ,
96 : "NUMPAD 0" ,
97 : "NUMPAD 1" ,
98 : "NUMPAD 2" ,
99 : "NUMPAD 3" ,
100 : "NUMPAD 4" ,
101 : "NUMPAD 5" ,
102 : "NUMPAD 6" ,
103 : "NUMPAD 7" ,
104 : "NUMPAD 8" ,
105 : "NUMPAD 9" ,
106 : "MULTIPLY" ,
107 : "ADD" ,
109 : "SUBTRACT" ,
110 : "DECIMAL POINT" ,
111 : "DIVIDE" ,
112 : "F1" ,
113 : "F2" ,
114 : "F3" ,
115 : "F4" ,
116 : "F5" ,
117 : "F6" ,
118 : "F7" ,
119 : "F8" ,
120 : "F9" ,
121 : "F10" ,
122 : "F11" ,
123 : "F12" ,
144 : "NUM LOCK" ,
145 : "SCROLL LOCK" ,
186 : "SEMI-COLON" ,
187 : "EQUAL SIGN" ,
188 : "COMMA" ,
189 : "DASH" ,
190 : "PERIOD" ,
191 : "FORWARD SLASH" ,
192 : "GRAVE ACCENT" ,
219 : "OPEN BRACKET" ,
220 : "BACK SLASH" ,
221 : "CLOSE BRAKET" ,
222 : "SINGLE QUOTE"
};
// Modifier keys override.
keyMap[ CKEDITOR.ALT ] = 'ALT';
keyMap[ CKEDITOR.SHIFT ] = 'SHIFT';
keyMap[ CKEDITOR.CTRL ] = 'CTRL';
// Sort in desc.
var modifiers = [ CKEDITOR.ALT, CKEDITOR.SHIFT, CKEDITOR.CTRL ];
function representKeyStroke( keystroke )
{
var quotient,
modifier,
presentation = [];
for ( var i = 0; i < modifiers.length; i++ )
{
modifier = modifiers[ i ];
quotient = keystroke / modifiers[ i ];
if ( quotient > 1 && quotient <= 2 )
{
keystroke -= modifier;
presentation.push( keyMap[ modifier ] );
}
}
presentation.push( keyMap[ keystroke ]
|| String.fromCharCode( keystroke ) );
return presentation.join( '+' );
}
var variablesPattern = /\$\{(.*?)\}/g;
function replaceVariables( match, name )
{
var keystrokes = editor.config.keystrokes,
definition,
length = keystrokes.length;
for ( var i = 0; i < length; i++ )
{
definition = keystrokes[ i ];
if ( definition[ 1 ] == name )
break;
}
return representKeyStroke( definition[ 0 ] );
}
// Create the help list directly from lang file entries.
function buildHelpContents()
{
var pageTpl = '<div class="cke_accessibility_legend" role="document" aria-labelledby="cke_' + id + '_arialbl" tabIndex="-1">%1</div>' +
'<span id="cke_' + id + '_arialbl" class="cke_voice_label">' + lang.contents + ' </span>',
sectionTpl = '<h1>%1</h1><dl>%2</dl>',
itemTpl = '<dt>%1</dt><dd>%2</dd>';
var pageHtml = [],
sections = lang.legend,
sectionLength = sections.length;
for ( var i = 0; i < sectionLength; i++ )
{
var section = sections[ i ],
sectionHtml = [],
items = section.items,
itemsLength = items.length;
for ( var j = 0; j < itemsLength; j++ )
{
var item = items[ j ],
itemHtml;
itemHtml = itemTpl.replace( '%1', item.name ).
replace( '%2', item.legend.replace( variablesPattern, replaceVariables ) );
sectionHtml.push( itemHtml );
}
pageHtml.push( sectionTpl.replace( '%1', section.name ).replace( '%2', sectionHtml.join( '' ) ) );
}
return pageTpl.replace( '%1', pageHtml.join( '' ) );
}
return {
title : lang.title,
minWidth : 600,
minHeight : 400,
contents : [
{
id : 'info',
label : editor.lang.common.generalTab,
expand : true,
elements :
[
{
type : 'html',
id : 'legends',
focus : function() {},
html : buildHelpContents() +
'<style type="text/css">' +
'.cke_accessibility_legend' +
'{' +
'width:600px;' +
'height:400px;' +
'padding-right:5px;' +
'overflow-y:auto;' +
'overflow-x:hidden;' +
'}' +
'.cke_accessibility_legend h1' +
'{' +
'font-size: 20px;' +
'border-bottom: 1px solid #AAA;' +
'margin: 5px 0px 15px;' +
'}' +
'.cke_accessibility_legend dl' +
'{' +
'margin-left: 5px;' +
'}' +
'.cke_accessibility_legend dt' +
'{' +
'font-size: 13px;' +
'font-weight: bold;' +
'}' +
'.cke_accessibility_legend dd' +
'{' +
'white-space:normal;' +
'margin:10px' +
'}' +
'</style>'
}
]
}
],
buttons : [ CKEDITOR.dialog.cancelButton ]
};
}); | PypiClean |
/FALCONN-1.3.1.tar.gz/FALCONN-1.3.1/external/pybind11/docs/benchmark.rst | Benchmark
=========
The following is the result of a synthetic benchmark comparing both compilation
time and module size of pybind11 against Boost.Python. A detailed report about a
Boost.Python to pybind11 conversion of a real project is available here: [#f1]_.
.. [#f1] http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf
Setup
-----
A python script (see the ``docs/benchmark.py`` file) was used to generate a set
of files with dummy classes whose count increases for each successive benchmark
(between 1 and 2048 classes in powers of two). Each class has four methods with
a randomly generated signature with a return value and four arguments. (There
was no particular reason for this setup other than the desire to generate many
unique function signatures whose count could be controlled in a simple way.)
Here is an example of the binding code for one class:
.. code-block:: cpp
...
class cl034 {
public:
cl279 *fn_000(cl084 *, cl057 *, cl065 *, cl042 *);
cl025 *fn_001(cl098 *, cl262 *, cl414 *, cl121 *);
cl085 *fn_002(cl445 *, cl297 *, cl145 *, cl421 *);
cl470 *fn_003(cl200 *, cl323 *, cl332 *, cl492 *);
};
...
PYBIND11_MODULE(example, m) {
...
py::class_<cl034>(m, "cl034")
.def("fn_000", &cl034::fn_000)
.def("fn_001", &cl034::fn_001)
.def("fn_002", &cl034::fn_002)
.def("fn_003", &cl034::fn_003)
...
}
The Boost.Python version looks almost identical except that a return value
policy had to be specified as an argument to ``def()``. For both libraries,
compilation was done with
.. code-block:: bash
Apple LLVM version 7.0.2 (clang-700.1.81)
and the following compilation flags
.. code-block:: bash
g++ -Os -shared -rdynamic -undefined dynamic_lookup -fvisibility=hidden -std=c++14
Compilation time
----------------
The following log-log plot shows how the compilation time grows for an
increasing number of class and function declarations. pybind11 includes many
fewer headers, which initially leads to shorter compilation times, but the
performance is ultimately fairly similar (pybind11 is 19.8 seconds faster for
the largest largest file with 2048 classes and a total of 8192 methods -- a
modest **1.2x** speedup relative to Boost.Python, which required 116.35
seconds).
.. only:: not latex
.. image:: pybind11_vs_boost_python1.svg
.. only:: latex
.. image:: pybind11_vs_boost_python1.png
Module size
-----------
Differences between the two libraries become much more pronounced when
considering the file size of the generated Python plugin: for the largest file,
the binary generated by Boost.Python required 16.8 MiB, which was **2.17
times** / **9.1 megabytes** larger than the output generated by pybind11. For
very small inputs, Boost.Python has an edge in the plot below -- however, note
that it stores many definitions in an external library, whose size was not
included here, hence the comparison is slightly shifted in Boost.Python's
favor.
.. only:: not latex
.. image:: pybind11_vs_boost_python2.svg
.. only:: latex
.. image:: pybind11_vs_boost_python2.png
| PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/layout/RadioGroup.js | define("dojox/layout/RadioGroup",["dojo/_base/kernel","dojo/_base/declare","dojo/_base/html","dojo/_base/lang","dojo/_base/query","dijit/_Widget","dijit/_Templated","dijit/_Contained","dijit/layout/StackContainer","dojo/fx/easing","dojo/_base/fx","dojo/dom-construct","dojo/dom-class"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,_a,_b,_c,_d){
_1.experimental("dojox.layout.RadioGroup");
var _e=_2("dojox.layout.RadioGroup",[_9,_7],{duration:750,hasButtons:false,buttonClass:"dojox.layout._RadioButton",templateString:"<div class=\"dojoxRadioGroup\">"+" \t<div dojoAttachPoint=\"buttonHolder\" style=\"display:none;\">"+"\t\t<table class=\"dojoxRadioButtons\"><tbody><tr class=\"dojoxRadioButtonRow\" dojoAttachPoint=\"buttonNode\"></tr></tbody></table>"+"\t</div>"+"\t<div class=\"dojoxRadioView\" dojoAttachPoint=\"containerNode\"></div>"+"</div>",startup:function(){
this.inherited(arguments);
this._children=this.getChildren();
this._buttons=this._children.length;
this._size=_3.coords(this.containerNode);
if(this.hasButtons){
_3.style(this.buttonHolder,"display","block");
}
},_setupChild:function(_f){
_3.style(_f.domNode,"position","absolute");
if(this.hasButtons){
var tmp=this.buttonNode.appendChild(_c.create("td"));
var n=_c.create("div",null,tmp),_10=_4.getObject(this.buttonClass),_11=new _10({label:_f.title,page:_f},n);
_4.mixin(_f,{_radioButton:_11});
_11.startup();
}
_f.domNode.style.display="none";
},removeChild:function(_12){
if(this.hasButtons&&_12._radioButton){
_12._radioButton.destroy();
delete _12._radioButton;
}
this.inherited(arguments);
},_transition:function(_13,_14){
this._showChild(_13);
if(_14){
this._hideChild(_14);
}
if(this.doLayout&&_13.resize){
_13.resize(this._containerContentBox||this._contentBox);
}
},_showChild:function(_15){
var _16=this.getChildren();
_15.isFirstChild=(_15==_16[0]);
_15.isLastChild=(_15==_16[_16.length-1]);
_15.selected=true;
_15.domNode.style.display="";
if(_15._onShow){
_15._onShow();
}else{
if(_15.onShow){
_15.onShow();
}
}
},_hideChild:function(_17){
_17.selected=false;
_17.domNode.style.display="none";
if(_17.onHide){
_17.onHide();
}
}});
_2("dojox.layout.RadioGroupFade",_e,{_hideChild:function(_18){
_b.fadeOut({node:_18.domNode,duration:this.duration,onEnd:_4.hitch(this,"inherited",arguments,arguments)}).play();
},_showChild:function(_19){
this.inherited(arguments);
_3.style(_19.domNode,"opacity",0);
_b.fadeIn({node:_19.domNode,duration:this.duration}).play();
}});
_2("dojox.layout.RadioGroupSlide",_e,{easing:"dojo.fx.easing.backOut",zTop:99,constructor:function(){
if(_4.isString(this.easing)){
this.easing=_4.getObject(this.easing);
}
},_positionChild:function(_1a){
if(!this._size){
return;
}
var rA=true,rB=true;
switch(_1a.slideFrom){
case "bottom":
rB=!rB;
break;
case "right":
rA=!rA;
rB=!rB;
break;
case "top":
break;
case "left":
rA=!rA;
break;
default:
rA=Math.round(Math.random());
rB=Math.round(Math.random());
break;
}
var _1b=rA?"top":"left",val=(rB?"-":"")+(this._size[rA?"h":"w"]+20)+"px";
_3.style(_1a.domNode,_1b,val);
},_showChild:function(_1c){
var _1d=this.getChildren();
_1c.isFirstChild=(_1c==_1d[0]);
_1c.isLastChild=(_1c==_1d[_1d.length-1]);
_1c.selected=true;
_3.style(_1c.domNode,{zIndex:this.zTop,display:""});
if(this._anim&&this._anim.status()=="playing"){
this._anim.gotoPercent(100,true);
}
this._anim=_b.animateProperty({node:_1c.domNode,properties:{left:0,top:0},duration:this.duration,easing:this.easing,onEnd:_4.hitch(_1c,function(){
if(this.onShow){
this.onShow();
}
if(this._onShow){
this._onShow();
}
}),beforeBegin:_4.hitch(this,"_positionChild",_1c)});
this._anim.play();
},_hideChild:function(_1e){
_1e.selected=false;
_1e.domNode.style.zIndex=this.zTop-1;
if(_1e.onHide){
_1e.onHide();
}
}});
_2("dojox.layout._RadioButton",[_6,_7,_8],{label:"",page:null,templateString:"<div dojoAttachPoint=\"focusNode\" class=\"dojoxRadioButton\"><span dojoAttachPoint=\"titleNode\" class=\"dojoxRadioButtonLabel\">${label}</span></div>",startup:function(){
this.connect(this.domNode,"onmouseenter","_onMouse");
},_onMouse:function(e){
this.getParent().selectChild(this.page);
this._clearSelected();
_d.add(this.domNode,"dojoxRadioButtonSelected");
},_clearSelected:function(){
_5(".dojoxRadioButtonSelected",this.domNode.parentNode.parentNode).removeClass("dojoxRadioButtonSelected");
}});
_4.extend(_6,{slideFrom:"random"});
}); | PypiClean |
/Dero-0.15.0-py3-none-any.whl/dero/manager/io/file/load/parsers/assign.py | import ast
from typing import Union, TYPE_CHECKING
if TYPE_CHECKING:
from dero.manager.assignments.models.statement import AssignmentStatement
from dero.manager.assignments.models.container import AssignmentStatementContainer
AssignmentStatementOrNone = Union['AssignmentStatement', None]
class AssignmentExtractor(ast.NodeVisitor):
def __init__(self):
from dero.manager.assignments.models.container import AssignmentStatementContainer
self.assigns = AssignmentStatementContainer([])
def visit_Assign(self, node):
from dero.manager.assignments.models.statement import AssignmentStatement
self.assigns.append(
AssignmentStatement.from_ast_assign(node)
)
def visit_AnnAssign(self, node):
from dero.manager.assignments.models.statement import AssignmentStatement
self.assigns.append(
AssignmentStatement.from_ast_assign(node)
)
def extract_assignments_from_ast(module: ast.Module) -> 'AssignmentStatementContainer':
ae = AssignmentExtractor()
ae.visit(module)
return ae.assigns
def extract_assignment_from_ast(module: ast.Module) -> AssignmentStatementOrNone:
assign_container = extract_assignments_from_ast(module)
if len(assign_container) == 0:
return None
if len(assign_container) > 1:
raise ValueError(f'expected to extract one assignment from ast. got {len(assign_container)} '
f'assigns: {assign_container.items}')
return assign_container[0]
class AssignmentByVarnameExtractor(AssignmentExtractor):
def __init__(self, varname: str):
self.varname = varname
super().__init__()
def visit_Assign(self, node):
varnames = [target.id for target in node.targets]
if self.varname in varnames:
super().visit_Assign(node)
def visit_AnnAssign(self, node):
if node.target.id == self.varname:
super().visit_AnnAssign(node)
def extract_assignments_from_ast_by_name(module: ast.Module, name: str) -> 'AssignmentStatementContainer':
ae = AssignmentByVarnameExtractor(name)
ae.visit(module)
return ae.assigns
def extract_assignment_from_ast_by_name(module: ast.Module, name: str) -> AssignmentStatementOrNone:
assign_container = extract_assignments_from_ast_by_name(module, name)
if len(assign_container) == 0:
return None
if len(assign_container) > 1:
raise ValueError(f'expected to extract one assignment from ast. got {len(assign_container)} '
f'assigns: {assign_container.items}')
return assign_container[0] | PypiClean |
/Flootty-3.3.0-py3-none-any.whl/flootty/floo/common/utils.py | import os
import errno
import json
import re
import hashlib
import time
import webbrowser
from functools import wraps
try:
from urllib.parse import urlparse
assert urlparse
except ImportError:
from urlparse import urlparse
try:
from .. import editor
from . import shared as G
from .exc_fmt import str_e
from . import msg
from .lib import DMP
assert G and DMP
except ImportError:
import editor
import msg
from exc_fmt import str_e
import shared as G
from lib import DMP
class JOIN_ACTION(object):
PROMPT = 1
UPLOAD = 2
DOWNLOAD = 3
class FlooPatch(object):
def __init__(self, current, buf):
self.buf = buf
self.current = current
self.previous = buf['buf']
if buf['encoding'] == 'base64':
self.md5_before = hashlib.md5(self.previous).hexdigest()
self.md5_after = hashlib.md5(self.current).hexdigest()
else:
try:
self.md5_before = hashlib.md5(self.previous.encode('utf-8')).hexdigest()
except Exception as e:
# Horrible fallback if for some reason encoding doesn't agree with actual object
self.md5_before = hashlib.md5(self.previous).hexdigest()
msg.log('Error calculating md5_before for ', str(self), ': ', str_e(e))
try:
self.md5_after = hashlib.md5(self.current.encode('utf-8')).hexdigest()
except Exception as e:
# Horrible fallback if for some reason encoding doesn't agree with actual object
self.md5_after = hashlib.md5(self.current).hexdigest()
msg.log('Error calculating md5_after for ', str(self), ': ', str_e(e))
def __str__(self):
return '%s - %s' % (self.buf['id'], self.buf['path'])
def patches(self):
return DMP.patch_make(self.previous, self.current)
def to_json(self):
patches = self.patches()
if len(patches) == 0:
return None
patch_str = ''
for patch in patches:
patch_str += str(patch)
return {
'id': self.buf['id'],
'md5_after': self.md5_after,
'md5_before': self.md5_before,
'path': self.buf['path'],
'patch': patch_str,
'name': 'patch'
}
def reload_settings():
floorc_settings = load_floorc_json()
for name, val in floorc_settings.items():
setattr(G, name, val)
validate_auth(G.AUTH)
if G.SHARE_DIR:
G.BASE_DIR = G.SHARE_DIR
G.BASE_DIR = os.path.realpath(os.path.expanduser(G.BASE_DIR))
G.COLAB_DIR = os.path.join(G.BASE_DIR, 'share')
G.COLAB_DIR = os.path.realpath(G.COLAB_DIR)
if G.DEBUG:
msg.LOG_LEVEL = msg.LOG_LEVELS['DEBUG']
else:
msg.LOG_LEVEL = msg.LOG_LEVELS['MSG']
mkdir(G.COLAB_DIR)
return floorc_settings
def load_floorc_json():
# Expose a few settings for curious users to tweak
s = {
'expert_mode': False,
'debug': False,
}
try:
with open(G.FLOORC_JSON_PATH, 'r') as fd:
floorc_json = fd.read()
except IOError as e:
if e.errno == errno.ENOENT:
return s
raise
try:
default_settings = json.loads(floorc_json)
except ValueError:
return s
for k, v in default_settings.items():
s[k.upper()] = v
return s
def save_floorc_json(s):
floorc_json = {}
for k, v in s.items():
floorc_json[k.lower()] = v
msg.log('Writing ', floorc_json)
with open(G.FLOORC_JSON_PATH, 'w') as fd:
fd.write(json.dumps(floorc_json, indent=4, sort_keys=True, separators=(',', ': ')))
def validate_auth(auth):
if type(auth) != dict:
msg.error('floorc.json validation error: Auth section is not an object!')
return False
to_delete = []
for k, v in auth.items():
if type(v) != dict:
msg.error('floorc.json validation error: host "', k, '" has invalid auth credentials. Did you put a setting in the auth section?')
to_delete.append(k)
break
for key in ['username', 'api_key', 'secret']:
if not v.get(key):
msg.error('floorc.json validation error: host "', k, '" missing "', key, '"')
to_delete.append(k)
break
for k in to_delete:
del auth[k]
return len(to_delete) == 0
def can_auth(host=None):
if host is None:
host = len(G.AUTH) and list(G.AUTH.keys())[0] or G.DEFAULT_HOST
auth = G.AUTH.get(host)
if type(auth) == dict:
return bool((auth.get('username') or auth.get('api_key')) and auth.get('secret'))
return False
cancelled_timeouts = set()
timeout_ids = set()
def set_timeout(func, timeout, *args, **kwargs):
return _set_timeout(func, timeout, False, *args, **kwargs)
def set_interval(func, timeout, *args, **kwargs):
return _set_timeout(func, timeout, True, *args, **kwargs)
def _set_timeout(func, timeout, repeat, *args, **kwargs):
timeout_id = set_timeout._top_timeout_id
if timeout_id > 100000:
set_timeout._top_timeout_id = 0
else:
set_timeout._top_timeout_id += 1
try:
from . import api
except ImportError:
import api
@api.send_errors
def timeout_func():
timeout_ids.discard(timeout_id)
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
if repeat:
editor.set_timeout(timeout_func, timeout)
timeout_ids.add(timeout_id)
editor.set_timeout(timeout_func, timeout)
timeout_ids.add(timeout_id)
return timeout_id
set_timeout._top_timeout_id = 0
def cancel_timeout(timeout_id):
if timeout_id in timeout_ids:
cancelled_timeouts.add(timeout_id)
rate_limits = {}
def rate_limit(name, timeout, func, *args, **kwargs):
if rate_limits.get(name):
return
rate_limits[name] = time.time()
func(*args, **kwargs)
def delete_limit():
del rate_limits[name]
set_timeout(delete_limit, timeout, *args, **kwargs)
def parse_url(workspace_url):
secure = G.SECURE
owner = None
workspace_name = None
# owner/workspacename
result = re.match('^([-\@\+\.\w]+)/([-\.\w]+)$', workspace_url)
if result:
workspace_url = 'https://' + G.DEFAULT_HOST + '/' + workspace_url
parsed_url = urlparse(workspace_url)
port = parsed_url.port
if G.DEBUG and parsed_url.scheme == 'http':
# Only obey http if we're debugging
if not port:
port = 3148
secure = False
if not port:
port = G.DEFAULT_PORT
# Allow /file/...
result = re.match('^/([-\@\+\.\w]+)/([-\.\w]+)/?.*$', parsed_url.path)
if not result:
# Old style URL. Do not remove. People still have these in their persistent.json
result = re.match('^/r/([-\@\+\.\w]+)/([-\.\w]+)/?$', parsed_url.path)
if result:
(owner, workspace_name) = result.groups()
else:
raise ValueError('%s is not a valid Floobits URL' % workspace_url)
return {
'host': parsed_url.hostname,
'owner': owner,
'port': port,
'workspace': workspace_name,
'secure': secure,
}
def to_workspace_url(r):
port = int(r.get('port', 3448))
if r['secure']:
proto = 'https'
if port == 3448:
port = ''
else:
proto = 'http'
if port == 3148:
port = ''
if port != '':
port = ':%s' % port
host = r.get('host', G.DEFAULT_HOST)
workspace_url = '%s://%s%s/%s/%s' % (proto, host, port, r['owner'], r['workspace'])
p = r.get('path')
if p:
workspace_url += '/file/%s' % p
line = r.get('line')
if line:
workspace_url += ':%s' % line
return workspace_url
def normalize_url(workspace_url):
return to_workspace_url(parse_url(workspace_url))
def get_full_path(p):
full_path = os.path.join(G.PROJECT_PATH, p)
return unfuck_path(full_path)
def unfuck_path(p):
return os.path.normpath(p)
def to_rel_path(p):
return os.path.relpath(p, G.PROJECT_PATH).replace(os.sep, '/')
def to_scheme(secure):
if secure is True:
return 'https'
return 'http'
def is_shared(p):
if not G.AGENT or not G.AGENT.joined_workspace:
return False
p = unfuck_path(p)
try:
if to_rel_path(p).find('../') == 0:
return False
except ValueError:
return False
return True
def update_floo_file(path, data):
try:
floo_json = json.loads(open(path, 'r').read())
except Exception:
pass
try:
floo_json.update(data)
except Exception:
floo_json = data
try:
with open(path, 'w') as floo_fd:
floo_fd.write(json.dumps(floo_json, indent=4, sort_keys=True, separators=(',', ': ')))
except Exception as e:
msg.warn('Couldn\'t update .floo file: ', floo_json, ': ', str_e(e))
def read_floo_file(path):
floo_file = os.path.join(path, '.floo')
info = {}
try:
floo_info = open(floo_file, 'rb').read().decode('utf-8')
info = json.loads(floo_info)
except (IOError, OSError):
pass
except Exception as e:
msg.warn('Couldn\'t read .floo file: ', floo_file, ': ', str_e(e))
return info
def get_persistent_data(per_path=None):
per_data = {'recent_workspaces': [], 'workspaces': {}}
per_path = per_path or os.path.join(G.BASE_DIR, 'persistent.json')
try:
per = open(per_path, 'rb')
except (IOError, OSError):
msg.debug('Failed to open ', per_path, '. Recent workspace list will be empty.')
return per_data
try:
data = per.read().decode('utf-8')
persistent_data = json.loads(data)
except Exception as e:
msg.debug('Failed to parse ', per_path, '. Recent workspace list will be empty.')
msg.debug(str_e(e))
msg.debug(data)
return per_data
if 'recent_workspaces' not in persistent_data:
persistent_data['recent_workspaces'] = []
if 'workspaces' not in persistent_data:
persistent_data['workspaces'] = {}
return persistent_data
def update_persistent_data(data):
seen = set()
recent_workspaces = []
for x in data['recent_workspaces']:
try:
if x['url'] in seen:
continue
seen.add(x['url'])
recent_workspaces.append(x)
except Exception as e:
msg.debug(str_e(e))
data['recent_workspaces'] = recent_workspaces
per_path = os.path.join(G.BASE_DIR, 'persistent.json')
with open(per_path, 'wb') as per:
per.write(json.dumps(data, indent=2).encode('utf-8'))
# Cleans up URLs in persistent.json
def normalize_persistent_data():
persistent_data = get_persistent_data()
for rw in persistent_data['recent_workspaces']:
rw['url'] = normalize_url(rw['url'])
for owner, workspaces in persistent_data['workspaces'].items():
for name, workspace in workspaces.items():
workspace['url'] = normalize_url(workspace['url'])
workspace['path'] = unfuck_path(workspace['path'])
update_persistent_data(persistent_data)
def add_workspace_to_persistent_json(owner, name, url, path):
d = get_persistent_data()
workspaces = d['workspaces']
if owner not in workspaces:
workspaces[owner] = {}
workspaces[owner][name] = {'url': url, 'path': path}
update_persistent_data(d)
def update_recent_workspaces(workspace_url):
d = get_persistent_data()
recent_workspaces = d.get('recent_workspaces', [])
recent_workspaces.insert(0, {'url': workspace_url})
recent_workspaces = recent_workspaces[:100]
seen = set()
new = []
for r in recent_workspaces:
string = json.dumps(r)
if string not in seen:
new.append(r)
seen.add(string)
d['recent_workspaces'] = new
update_persistent_data(d)
def get_workspace_by_path(path, _filter):
path = unfuck_path(path)
for owner, workspaces in get_persistent_data()['workspaces'].items():
for name, workspace in workspaces.items():
if unfuck_path(workspace['path']) == path:
r = _filter(workspace['url'])
if r:
return r
def rm(path):
"""removes path and dirs going up until a OSError"""
os.remove(path)
try:
os.removedirs(os.path.split(path)[0])
except OSError:
pass
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
editor.error_message('Cannot create directory {0}.\n{1}'.format(path, str_e(e)))
raise
def get_line_endings(path):
try:
with open(path, 'rb') as fd:
line = fd.readline()
except Exception:
return
if not line:
return
chunk = line[-2:]
if chunk == "\r\n":
return "\r\n"
if chunk[-1:] == "\n":
return "\n"
def save_buf(buf):
path = get_full_path(buf['path'])
mkdir(os.path.split(path)[0])
if buf['encoding'] == 'utf8':
newline = get_line_endings(path) or editor.get_line_endings(path)
try:
with open(path, 'wb') as fd:
if buf['encoding'] == 'utf8':
out = buf['buf']
if newline != '\n':
out = out.split('\n')
out = newline.join(out)
fd.write(out.encode('utf-8'))
else:
fd.write(buf['buf'])
except Exception as e:
msg.error('Error saving buf: ', str_e(e))
def _unwind_generator(gen_expr, cb=None, res=None):
try:
while True:
maybe_func = res
args = []
# if the first arg is callable, we need to call it (and assume the last argument is a callback)
if type(res) == tuple:
maybe_func = len(res) and res[0]
if not callable(maybe_func):
# send only accepts one argument... this is slightly dangerous if
# we ever just return a tuple of one elemetn
# TODO: catch no generator
if type(res) == tuple and len(res) == 1:
res = gen_expr.send(res[0])
else:
res = gen_expr.send(res)
continue
def f(*args):
return _unwind_generator(gen_expr, cb, args)
try:
args = list(res)[1:]
except Exception:
# assume not iterable
args = []
args.append(f)
return maybe_func(*args)
# TODO: probably shouldn't catch StopIteration to return since that can occur by accident...
except StopIteration:
pass
except __StopUnwindingException as e:
res = e.ret_val
if cb:
return cb(res)
return res
class __StopUnwindingException(BaseException):
def __init__(self, ret_val):
self.ret_val = ret_val
def return_value(args):
raise __StopUnwindingException(args)
def inlined_callbacks(f):
""" Branching logic in async functions generates a callback nightmare.
Use this decorator to inline the results. If you yield a function, it must
accept a callback as its final argument that it is responsible for firing.
example usage:
"""
@wraps(f)
def wrap(*args, **kwargs):
return _unwind_generator(f(*args, **kwargs))
return wrap
def has_browser():
valid_browsers = [
"MacOSX", # Default mac browser.
"Chrome",
"Chromium",
"Firefox",
"Safari",
"Opera",
"windows-default",
]
for browser in valid_browsers:
try:
webbrowser.get(browser)
return True
except Exception:
continue
return False | PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/gui/kivy/uix/dialogs/choice_dialog.py | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.widget import Widget
Builder.load_string('''
<ChoiceDialog@Popup>
id: popup
title: ''
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Widget:
size_hint: 1, 0.1
ScrollView:
orientation: 'vertical'
size_hint: 1, 0.8
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 2
size_hint: 1, None
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(popup.value)
popup.dismiss()
''')
class ChoiceDialog(Factory.Popup):
def __init__(self, title, choices, key, callback):
Factory.Popup.__init__(self)
if type(choices) is list:
choices = dict(map(lambda x: (x,x), choices))
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for k, v in sorted(choices.items()):
l = Label(text=v)
l.height = '48dp'
l.size_hint_x = 4
cb = CheckBox(group='choices')
cb.value = k
cb.height = '48dp'
cb.size_hint_x = 1
def f(cb, x):
if x: self.value = cb.value
cb.bind(active=f)
if k == key:
cb.active = True
layout.add_widget(l)
layout.add_widget(cb)
layout.add_widget(Widget(size_hint_y=1))
self.callback = callback
self.title = title
self.value = key | PypiClean |
/CsuPMTD-1.0.27.tar.gz/CsuPMTD-1.0.27/PMTD/maskrcnn_benchmark/apex/apex/pyprof/prof/reduction.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Mean(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod in ["torch", "Tensor"])
assert (op == "mean")
#Filter out named parameters
args = list(filter(lambda x : x['name'] == '', args))
assert (len(args) <= 2)
i = args[0]
self.shape = i['shape']
self.type = i['dtype']
self.dir = d.dir
self.sub = d.sub
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def elems(self):
return Utility.numElems(self.shape)
def bytes(self):
if self.sub == 0:
return self.elems() * Utility.typeToBytes(self.type)
else:
return 0
def flops(self):
if self.sub == 0:
return self.elems() + 1
else:
return 0
class Sum(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod in ["torch", "Tensor"])
assert (op == "sum")
assert (len(args) >= 1)
#Get input
if (args[0]['name'] == ""):
i = args[0]
else:
i = list(filter(lambda x : x['name'] == "input", args))[0]
self.shape = i['shape']
self.type = i['dtype']
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def elems(self):
return Utility.numElems(self.shape)
def flops(self):
# Note: This is incorrect, need to calculate actual flops (say via nvprof)
return self.elems()
def bytes(self):
return self.elems() * Utility.typeToBytes(self.type)
class Norm(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod in ["torch", "Tensor"])
assert (op == "norm")
#assert (len(args) == 1)
i = args[0]
self.shape = i['shape']
self.type = i['dtype']
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def elems(self):
return Utility.numElems(self.shape)
def bytes(self):
return self.elems() * Utility.typeToBytes(self.type)
def flops(self):
# square and add plus sqrt
return 2 * self.elems() + 1
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_ | PypiClean |
/ChronoNLP-3.0.2-py3-none-any.whl/Chrono/TimePhraseToChrono/Season.py | import re
import string
import Chrono.ChronoUtils.parse_text
from Chrono import chronoEntities as chrono
from Chrono.TimePhraseToChrono.Modifier import hasNextLastThis
from Chrono.ChronoUtils.parse_text import calculateSpan
from Chrono.config import DICTIONARY
## Parses a TimePhrase entity's text field to determine if it contains a season of the year written out in text form, then builds the associated chronoentity list
# @author Amy Olex
# @param s The TimePhrase entity to parse
# @param chronoID The current chronoID to increment as new chronoentities are added to list.
# @param chronoList The list of chrono objects we currently have. Will add to these.
# @return chronoList, chronoID Returns the expanded chronoList and the incremented chronoID.
def buildSeasonOfYear(s, chrono_id, chrono_list, ref_list):
boo, val, idxstart, idxend = hasSeasonOfYear(s, ref_list)
if boo:
ref_Sspan, ref_Espan = s.getSpan()
abs_Sspan = ref_Sspan + idxstart
abs_Espan = ref_Sspan + idxend
my_entity = chrono.ChronoSeasonOfYearEntity(entityID=str(chrono_id) + "entity", start_span=abs_Sspan, end_span=abs_Espan, season_type=val)
chrono_id = chrono_id + 1
#check here to see if it has a modifier
hasMod, mod_type, mod_start, mod_end = hasNextLastThis(s)
if(hasMod):
if mod_type == "This":
chrono_list.append(chrono.ChronoThisOperator(entityID=str(chrono_id) + "entity", start_span=abs_Sspan, end_span=abs_Espan, repeating_interval=my_entity.get_id()))
chrono_id = chrono_id + 1
if mod_type == "Next":
chrono_list.append(chrono.ChronoNextOperator(entityID=str(chrono_id) + "entity", start_span=abs_Sspan, end_span=abs_Espan, repeating_interval=my_entity.get_id()))
chrono_id = chrono_id + 1
if mod_type == "Last":
chrono_list.append(chrono.ChronoLastOperator(entityID=str(chrono_id) + "entity", start_span=abs_Sspan, end_span=abs_Espan, repeating_interval=my_entity.get_id()))
chrono_id = chrono_id + 1
#else:
# chrono_list.append(chrono.ChronoLastOperator(entityID=str(chrono_id) + "entity", start_span=abs_Sspan, end_span=abs_Espan, repeating_interval=my_entity.get_id()))
# chrono_id = chrono_id + 1
# else:
# chrono_list.append(chrono.ChronoLastOperator(entityID=str(chrono_id) + "entity", start_span=abs_Sspan, end_span=abs_Espan, repeating_interval=my_entity.get_id()))
# chrono_id = chrono_id+1
#check to see if it has a number associated with it. We assume the number comes before the interval string
if idxstart > 0:
substr = s.getText()[0:idxstart]
m = re.search('([0-9]{1,2})', substr)
if m is not None :
num_val = m.group(0)
abs_Sspan = ref_Sspan + m.span(0)[0]
abs_Espan = ref_Sspan + m.span(0)[1]
my_number_entity = chrono.ChronoNumber(entityID=str(chrono_id) + "entity", start_span=abs_Sspan, end_span=abs_Espan, value=num_val)
chrono_id = chrono_id + 1
#add the number entity to the list
chrono_list.append(my_number_entity)
my_entity.set_number(my_number_entity.get_id())
#else search for a text number
else:
texNumVal = Chrono.ChronoUtils.parse_text.getNumberFromText(substr)
if texNumVal is not None:
#create the number entity
my_number_entity = chrono.ChronoNumber(entityID=str(chrono_id) + "entity", start_span=ref_Sspan, end_span=ref_Sspan + (idxstart - 1), value=texNumVal)
chrono_id = chrono_id + 1
#append to list
chrono_list.append(my_number_entity)
#link to interval entity
my_entity.set_number(my_number_entity.get_id())
chrono_list.append(my_entity)
return chrono_list, chrono_id
## Takes in a TimePhrase entity and identifies if it has any season terms, like "summer" or "fall"
# @author Amy Olex
# @param tpentity The TimePhrase entity object being parsed
# @return Outputs 4 values: Boolean Flag, Value text, start index, end index
def hasSeasonOfYear(tpentity, ref_list):
refStart_span, refEnd_span = tpentity.getSpan()
# convert to all lower
# text_lower = tpentity.getText().lower()
text = tpentity.getText().lower()
# remove all punctuation
text_norm = text.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation))).strip()
# convert to list
text_list = text_norm.split(" ")
# define my period lists
seasonofyear = DICTIONARY["Season"]
# figure out if any of the tokens in the text_list are also in the ampm list
intersect = list(set(text_list) & set(seasonofyear))
# only proceed if the intersect list has a length of 1 or more.
# For this method I'm assuming it will only be a length of 1, if it is not then we don't know what to do with it.
if len(intersect) == 1:
term = intersect[0]
start_idx, end_idx = calculateSpan(text_norm, term)
if term == "summer" or term == "summers":
start_idx, end_idx = calculateSpan(text_norm, "summer")
absStart = refStart_span + start_idx
absEnd = refStart_span + end_idx
postag = ref_list[Chrono.ChronoUtils.parse_text.getRefIdx(ref_list, absStart, absEnd)].getPos()
if postag == "NN":
return True, "Summer", start_idx, end_idx
elif term == "winter" or term == "winters":
start_idx, end_idx = calculateSpan(text_norm, "winter")
absStart = refStart_span + start_idx
absEnd = refStart_span + end_idx
postag = ref_list[Chrono.ChronoUtils.parse_text.getRefIdx(ref_list, absStart, absEnd)].getPos()
if postag == "NN":
return True, "Winter", start_idx, end_idx
elif term == "fall" or term == "falls":
start_idx, end_idx = calculateSpan(text_norm, "fall")
absStart = refStart_span + start_idx
absEnd = refStart_span + end_idx
postag = ref_list[Chrono.ChronoUtils.parse_text.getRefIdx(ref_list, absStart, absEnd)].getPos()
if postag == "NN":
return True, "Fall", start_idx, end_idx
elif term == "spring" or term == "springs":
start_idx, end_idx = calculateSpan(text_norm, "spring")
absStart = refStart_span + start_idx
absEnd = refStart_span + end_idx
postag = ref_list[Chrono.ChronoUtils.parse_text.getRefIdx(ref_list, absStart, absEnd)].getPos()
if postag == "NN":
return True, "Spring", start_idx, end_idx
else:
return False, None, None, None
return False, None, None, None | PypiClean |
/HibiAPI-0.8.0-py3-none-any.whl/hibiapi/api/tieba/api.py | import hashlib
from enum import Enum
from random import randint
from typing import Any, Dict, Optional
from hibiapi.utils.config import APIConfig
from hibiapi.utils.net import catch_network_error
from hibiapi.utils.routing import BaseEndpoint, dont_route
Config = APIConfig("tieba")
class TiebaSignUtils:
salt = b"tiebaclient!!!"
@staticmethod
def random_digit(length: int) -> str:
return "".join(map(str, [randint(0, 9) for _ in range(length)]))
@staticmethod
def construct_content(params: Dict[str, Any]) -> bytes:
# NOTE: this function used to construct form content WITHOUT urlencode
# Don't ask me why this is necessary, ask Tieba's programmers instead
return b"&".join(
map(
lambda k, v: (
k.encode()
+ b"="
+ str(v.value if isinstance(v, Enum) else v).encode()
),
params.keys(),
params.values(),
)
)
@classmethod
def sign(cls, params: Dict[str, Any]) -> bytes:
params.update(
{
"_client_id": (
"wappc_" + cls.random_digit(13) + "_" + cls.random_digit(3)
),
"_client_type": 2,
"_client_version": "9.9.8.32",
**{
k.upper(): str(v).strip()
for k, v in Config["net"]["params"].as_dict().items()
if v
},
}
)
params = {k: params[k] for k in sorted(params.keys())}
params["sign"] = (
hashlib.md5(cls.construct_content(params).replace(b"&", b"") + cls.salt)
.hexdigest()
.upper()
)
return cls.construct_content(params)
class TiebaEndpoint(BaseEndpoint):
base = "http://c.tieba.baidu.com"
@dont_route
@catch_network_error
async def request(
self, endpoint: str, *, params: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
response = await self.client.post(
url=self._join(self.base, endpoint, {}),
content=TiebaSignUtils.sign(params or {}),
)
response.raise_for_status()
return response.json()
async def post_list(self, *, name: str, page: int = 1, size: int = 50):
return await self.request(
"c/f/frs/page",
params={
"kw": name,
"pn": page,
"rn": size,
},
)
async def post_detail(
self,
*,
tid: int,
page: int = 1,
size: int = 50,
reversed: bool = False,
):
return await self.request(
"c/f/pb/page",
params={
**({"last": 1, "r": 1} if reversed else {}),
"kz": tid,
"pn": page,
"rn": size,
},
)
async def subpost_detail(
self,
*,
tid: int,
pid: int,
page: int = 1,
size: int = 50,
):
return await self.request(
"c/f/pb/floor",
params={
"kz": tid,
"pid": pid,
"pn": page,
"rn": size,
},
)
async def user_profile(self, *, uid: int):
return await self.request(
"c/u/user/profile",
params={
"uid": uid,
"need_post_count": 1,
"has_plist": 1,
},
)
async def user_subscribed(
self, *, uid: int, page: int = 1
): # XXX This API required user login!
return await self.request(
"c/f/forum/like",
params={
"is_guest": 0,
"uid": uid,
"page_no": page,
},
) | PypiClean |
/DjangoComponents-0.0.0.33.tar.gz/DjangoComponents-0.0.0.33/django_components/static/js/build/.module-cache/5bb08a404ee790c0ba3a9151cc9f9db947ff5291.js | var Pagebutton = React.createClass({displayName: "Pagebutton",
pager: function() {
if (this.props.clicky) {
if (this.props.className.includes('ellipses')) {
if (this.props.forward) this.props.forward();
if (this.props.back) this.props.back();
} else {
this.props.clicky(this.props.children)
}
}
},
render: function() {
var classes='page-button';
if (this.props.page == this.props.children) classes+= ' active-page';
return React.createElement("span", {className: classes, onMouseDown: this.pager}, this.props.children)
}
});
var Pagebar = React.createClass({displayName: "Pagebar",
render: function() {
var items = [];
if (this.props.pages > 1) {
items.push(React.createElement(Pagebutton, {className: "page-button", clicky: this.props.clicky, page: this.props.page}, "1"));
if (this.props.pages <= 5) {
for (var i = 2; i < this.props.pages; i++) {
items.push(React.createElement(Pagebutton, {className: "page-button", clicky: this.props.clicky, page: this.props.page}, i));
}
} else {
if (this.props.page < 4) {
for (var i = 2; i < 5; i++) {
items.push(React.createElement(Pagebutton, {className: "page-button", clicky: this.props.clicky, page: this.props.page}, i));
}
items.push(React.createElement(Pagebutton, {className: "page-button ellipses", forward: this.props.forward, clicky: this.props.clicky}, "..."));
} else if (this.props.page >= this.props.pages - 2) {
items.push(React.createElement(Pagebutton, {className: "page-button ellipses", back: this.props.back, clicky: this.props.clicky}, "..."));
for (var i = this.props.pages - 3; i <= this.props.pages - 1; i++) {
items.push(React.createElement(Pagebutton, {className: "page-button", clicky: this.props.clicky, page: this.props.page}, i));
}
} else if (this.props.page == 4) {
for (var i = 2; i <= 5; i++) {
items.push(React.createElement(Pagebutton, {className: "page-button", clicky: this.props.clicky, page: this.props.page}, i));
}
items.push(React.createElement(Pagebutton, {className: "page-button ellipses", back: this.props.back, clicky: this.props.clicky}, "..."));
} else if (this.props.page == this.props.pages - 3) {
items.push(React.createElement(Pagebutton, {className: "page-button ellipses", back: this.props.back, clicky: this.props.clicky}, "..."));
for (var i = this.props.pages - 4; i <= this.props.pages - 1; i++) {
items.push(React.createElement(Pagebutton, {className: "page-button", clicky: this.props.clicky, page: this.props.page}, i));
}
} else {
items.push(React.createElement(Pagebutton, {className: "page-button ellipses", back: this.props.back, clicky: this.props.clicky}, "..."));
for (var i = this.props.page - 2; i <= this.props.page + 2; i++) {
items.push(React.createElement(Pagebutton, {className: "page-button", clicky: this.props.clicky, page: this.props.page}, i));
}
items.push(React.createElement(Pagebutton, {className: "page-button ellipses", forward: this.props.forward, clicky: this.props.clicky}, "..."));
}
}
items.push(React.createElement(Pagebutton, {className: "page-button", clicky: this.props.clicky, page: this.props.page}, this.props.pages));
} else {
items.push(React.createElement(Pagebutton, {className: "page-button", clicky: this.props.clicky, page: this.props.page}, "1"));
}
return React.createElement("div", {className: "pagebar-wrap"}, items);
}
});
var Page = React.createClass({displayName: "Page",
render: function() {
var startPoint = ((this.props.page-1) * this.props.perPage);
var endPoint = startPoint + this.props.perPage;
var items = [];
for (var i = startPoint; i < endPoint; i++) {
if (typeof this.props.data[i] !== 'undefined') items.push(React.createElement("div", {className: "pagination-item"}, this.props.data[i]));
}
return (
React.createElement("div", {className: "lk-pagination-content"},
items
)
);
}
});
var Pagination = React.createClass({displayName: "Pagination",
getInitialState: function() {
return {
page: 1, //cur page
pages: Math.ceil(JSON.parse(this.props.data).length/this.props.perPage), // num of pages
data: JSON.parse(this.props.data),
parsedData: JSON.parse(this.props.data),
searchable: JSON.parse(this.props.searchable.toLowerCase()),
search: ''
};
},
changePage: function(num) {
this.setState({page: parseInt(num)});
},
jumpBack: function() {
this.setState({page: this.state.page - 3});
},
jumpForward: function() {
this.setState({page: this.state.page + 3});
},
change: function(e) {
this.setState({search: e.target.value.toLowerCase()});
var curData = this.state.data;
//var search = this.state.search;
var newData = [];
var search = e.target.value.split(' ');
// old search...
/*for (var i = 0; i < curData.length; i++) {
if (curData[i].toLowerCase().includes(search) || search == '') newData.push(curData[i]);
console.log(curData[i], search);
}*/
// fancy search...
for (var i = 0; i < curData.length; i++) {
var pushable = false;
var amt = 0;
for (var j = 0; j < search.length; j++) {
if (curData[i].toLowerCase().includes(search[j]) || search[j] == '') {
amt++;
}
}
if (amt == search.length) pushable = true;
if (pushable) {
newData.push(curData[i]);
}
}
console.log(Math.ceil(newData.length/this.props.perPage));
this.setState({
parsedData: newData,
page: 1,
pages: Math.ceil(newData.length/this.props.perPage)
});
},
render: function() {
var search = this.state.searchable ? (React.createElement("input", {type: "text", onChange: this.change})) : '';
return (
React.createElement("div", {className: "lk-pagination"},
search,
React.createElement(Pagebar, {page: this.state.page, pages: this.state.pages, clicky: this.changePage, back: this.jumpBack, forward: this.jumpForward}),
React.createElement(Page, {page: this.state.page, data: this.state.parsedData, pages: this.state.pages, perPage: parseInt(this.props.perPage)})
)
);
}
}); | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/variables/variables.py | import codecs
import os
from datetime import datetime
import yaml
from jinja2 import TemplateError
from jinja2.nativetypes import NativeEnvironment
from loguru import logger
from sqlalchemy import Column
from sqlalchemy.sql.sqltypes import DateTime, Integer, Unicode
from flexget import db_schema
from flexget.config_schema import register_config_key
from flexget.event import event
from flexget.manager import Session
from flexget.plugin import PluginError
from flexget.utils.database import json_synonym
logger = logger.bind(name='variables')
DB_VERSION = 0
Base = db_schema.versioned_base('variables', DB_VERSION)
class Variables(Base):
__tablename__ = 'variables'
id = Column(Integer, primary_key=True)
_variables = Column('variables', Unicode)
variables = json_synonym('_variables')
added = Column(DateTime, default=datetime.now)
def variables_from_file(config_base, filename):
variables_file = os.path.join(config_base, filename)
if not os.path.exists(variables_file):
raise PluginError('File %s does not exist!' % variables_file)
try:
with codecs.open(variables_file, 'rb', 'utf-8') as f:
variables_dict = yaml.safe_load(f.read())
except yaml.YAMLError as e:
raise PluginError('Invalid variables file: %s' % e)
return variables_dict or {}
def variables_from_db():
with Session() as session:
variables = session.query(Variables).first()
if variables:
return variables.variables
else:
return {}
def variables_to_db(variables_dict):
with Session() as session:
variables = session.query(Variables).first()
if not variables:
variables = Variables()
variables.variables = variables_dict
session.merge(variables)
@event('manager.before_config_validate')
def process_variables(config, manager):
"""Render all string elements of the config against defined variables."""
env_params = {
'block_start_string': '^^disabled^^',
'block_end_string': '^^disabled^^',
'variable_start_string': '{?',
'variable_end_string': '?}',
}
if 'variables' not in config or config.get('variables') is False:
return
env = NativeEnvironment(**env_params)
if isinstance(config['variables'], bool):
logger.debug('trying to load variables from DB')
variables = variables_from_db()
elif isinstance(config['variables'], dict):
logger.debug('loading variables from config')
variables = config['variables']
else:
logger.debug('trying to load variables from file')
variables = variables_from_file(manager.config_base, config['variables'])
logger.debug('updating DB with variable file contents')
variables_to_db(variables)
env.globals = variables
_process(config, env)
return config
def _process(element, environment):
if isinstance(element, dict):
for k, v in list(element.items()):
new_key = _process(k, environment)
if new_key:
element[new_key] = element.pop(k)
k = new_key
val = _process(v, environment)
if val:
element[k] = val
elif isinstance(element, list):
for i, v in enumerate(element):
val = _process(v, environment)
if val:
element[i] = val
elif isinstance(element, str) and '{?' in element:
try:
template = environment.from_string(element)
return template.render()
except (TemplateError, TypeError):
return None
variables_config_schema = {'type': ['string', 'boolean', 'object']}
@event('config.register')
def register_config():
register_config_key('variables', variables_config_schema) | PypiClean |
/KicadModTree-1.1.2.tar.gz/KicadModTree-1.1.2/README.md | This repository contains scripts to generate custom KiCAD footprints using python, and a framework which allows us to
create custom KiCAD footprint. A big bunch of footprints of the KiCad library was developed using this framework.
# KicadModTree
**Licence:** GNU GPLv3+
**Maintainer:** Thomas Pointhuber
[](https://travis-ci.org/pointhi/kicad-footprint-generator)
[](https://codeclimate.com/github/pointhi/kicad-footprint-generator)
[](http://kicad-footprint-generator.readthedocs.io/en/latest/?badge=latest)
**Supports:** Python 2.7 and 3.3+
## About
I started drawing a bunch of similar footprints for KiCAD, like connectors which are mainly one base shape, and different
amount of pins. To be able to update/improve those footprints quickly I decided to write my own footprint generator Framework,
to allow simple creation of easy as well complex shapes.
This is my second approach (the first one can be found in the git history). This solution should be able to be easy to
use, to read and also be easy to expand with custom nodes.
## Overview
This framework is mainly based on the idea of scripted CAD systems (for example OpenSCAD). This means, everything is a
node, and can be structured like a tree. In other words, you can group parts of the footprint, and translate them in any
way you want. Also cloning & co. is no problem anymore because of this concept.
To be able to create custom Nodes, I separated the system in two parts. Base nodes, which represents simple structures
and also be used by KiCAD itself, and specialized nodes which alter the behaviour of base nodes (for example positioning),
or represent a specialized usage of base nodes (for example RectLine).
When you serialize your footprint, the serialize command only has to handle base nodes, because all other nodes are based
upon the base nodes. This allows us to write specialized nodes without worrying about the FileHandlers or other core systems.
You simply create your special node, and the framework knows how to handle it seamlessly.
Please look into the **[Documentation](http://kicad-footprint-generator.readthedocs.io/en/latest/)** for further details
```
KicadModTree - The KicadModTree framework which is used for footprint generation
docs - Files required to generate a sphinx documentation
scripts - scripts which are generating footprints based on this library
```
## Development
### Install development Dependencies
```sh
manage.sh update_dev_packages
```
### run tests
```sh
manage.sh tests
```
## Example Script
```python
from KicadModTree import *
footprint_name = "example_footprint"
# init kicad footprint
kicad_mod = Footprint(footprint_name)
kicad_mod.setDescription("A example footprint")
kicad_mod.setTags("example")
# set general values
kicad_mod.append(Text(type='reference', text='REF**', at=[0, -3], layer='F.SilkS'))
kicad_mod.append(Text(type='value', text=footprint_name, at=[1.5, 3], layer='F.Fab'))
# create silscreen
kicad_mod.append(RectLine(start=[-2, -2], end=[5, 2], layer='F.SilkS'))
# create courtyard
kicad_mod.append(RectLine(start=[-2.25, -2.25], end=[5.25, 2.25], layer='F.CrtYd'))
# create pads
kicad_mod.append(Pad(number=1, type=Pad.TYPE_THT, shape=Pad.SHAPE_RECT,
at=[0, 0], size=[2, 2], drill=1.2, layers=Pad.LAYERS_THT))
kicad_mod.append(Pad(number=2, type=Pad.TYPE_THT, shape=Pad.SHAPE_CIRCLE,
at=[3, 0], size=[2, 2], drill=1.2, layers=Pad.LAYERS_THT))
# add model
kicad_mod.append(Model(filename="example.3dshapes/example_footprint.wrl",
at=[0, 0, 0], scale=[1, 1, 1], rotate=[0, 0, 0]))
# output kicad model
file_handler = KicadFileHandler(kicad_mod)
file_handler.writeFile('example_footprint.kicad_mod')
```
## Usage Steps
1. Navigate into the `scripts` directory, and look for the type of footprint you would like to generate. For example, if you wish to generate an SMD inductor footprint, `cd` into `scripts/Inductor_SMD`.
2. Open the \*.yaml (or \*.yml) file in a text editor. Study a few of the existing footprint definitions to get an idea of how your new footprint entry should be structured.
3. Add your new footprint by inserting your own new section in the file. An easy way to do this is by simply copying an existing footprint definition, and modifying it to suit your part. Note: You may have to add or remove additional parameters that are not listed.
4. Save your edits and close the text editor.
5. Run the python script, passing the \*.yaml or (\*.yml) file as a parameter, e.g. `python3 Inductor_SMD.py Inductor_SMD.yml`. This will generate the \*.kicad_mod files for each footprint defined in the \*.yaml (or \*.yml).
| PypiClean |
/BlueWhale3-Educational-0.4.1.tar.gz/BlueWhale3-Educational-0.4.1/orangecontrib/educational/widgets/utils/kmeans.py | from functools import wraps
from types import FunctionType
from typing import NamedTuple, List
import numpy as np
import Orange
HistoryEntry = NamedTuple("HistoryEntry", (("step", FunctionType),
("centroids", np.ndarray),
("clusters", np.ndarray)))
def historic(reassign=True):
def decorator(f):
@wraps(f)
def historian(self, *args, **kwargs):
# store decorated function (not `f`) to enable comparisons with
# Kmeans.<whatever>
self._store_history(historian)
f(self, *args, **kwargs)
if reassign:
self.clusters = self._find_clusters()
return historian
return decorator if isinstance(reassign, bool) else decorator(reassign)
class Kmeans:
"""
K-Means algorithm
Parameters
----------
data (np.ndarray): two-column d ata used for k-means
centroids (int, list or numpy.array or int): number of coordinates of centroids
"""
def __init__(self, data, centroids=3):
self.data = None
self.centroids = None
self.history: List[HistoryEntry] = []
self.clusters = None
self.reset(data, centroids)
@property
def k(self):
return len(self.centroids)
@property
def waits_reassignment(self):
return self.history and self.history[-1].step == Kmeans.move_centroids
@property
def converged(self):
"""
Clustering converged if the last three steps were assignment, moving,
and assignment, with membership assignments being the same.
"""
if len(self.history) < 3:
return False
a, b, c = self.history[-3:]
return a.step == c.step == Kmeans.assign_membership \
and b.step == Kmeans.move_centroids \
and np.all(a.clusters == c.clusters)
def _find_clusters(self):
dist = Orange.distance.Euclidean(self.data, self.centroids)
return np.argmin(dist, axis=1)
def _store_history(self, step):
self.history.append(
HistoryEntry(step, np.copy(self.centroids), np.copy(self.clusters)))
def step_back(self):
if self.history:
_, self.centroids, self.clusters = self.history.pop()
def reset(self, data, centroids=3):
self.data = data
self.centroids = np.array(
centroids if not isinstance(centroids, int)
else [self._random_position() for _ in range(centroids)])
self.clusters = self._find_clusters()
self.history = []
@historic
def assign_membership(self):
pass
@historic(reassign=False)
def move_centroids(self):
for i in range(self.k):
points = self.data[self.clusters == i]
self.centroids[i, :] = np.average(points, axis=0) if points.size \
else self._random_position()
@historic
def add_centroid(self, x=None, y=None):
assert (x is None) == (y is None)
new = self._random_position() if x is None else np.array([x, y])
self.centroids = np.vstack((self.centroids, new))
@historic
def delete_centroid(self, num):
self.centroids = np.vstack((self.centroids[:num],
self.centroids[num + 1:]))
@historic
def move_centroid(self, _index, x, y):
self.centroids[_index, :] = np.array([x, y])
def _random_position(self):
n = len(self.data)
sample = np.random.choice(n, np.min((5, n)))
return np.mean(self.data[sample], axis=0) | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/@types/node/wasi.d.ts | declare module 'wasi' {
interface WASIOptions {
/**
* An array of strings that the WebAssembly application will
* see as command line arguments. The first argument is the virtual path to the
* WASI command itself.
*/
args?: string[] | undefined;
/**
* An object similar to `process.env` that the WebAssembly
* application will see as its environment.
*/
env?: object | undefined;
/**
* This object represents the WebAssembly application's
* sandbox directory structure. The string keys of `preopens` are treated as
* directories within the sandbox. The corresponding values in `preopens` are
* the real paths to those directories on the host machine.
*/
preopens?: NodeJS.Dict<string> | undefined;
/**
* By default, WASI applications terminate the Node.js
* process via the `__wasi_proc_exit()` function. Setting this option to `true`
* causes `wasi.start()` to return the exit code rather than terminate the
* process.
* @default false
*/
returnOnExit?: boolean | undefined;
/**
* The file descriptor used as standard input in the WebAssembly application.
* @default 0
*/
stdin?: number | undefined;
/**
* The file descriptor used as standard output in the WebAssembly application.
* @default 1
*/
stdout?: number | undefined;
/**
* The file descriptor used as standard error in the WebAssembly application.
* @default 2
*/
stderr?: number | undefined;
}
/**
* The `WASI` class provides the WASI system call API and additional convenience
* methods for working with WASI-based applications. Each `WASI` instance
* represents a distinct sandbox environment. For security purposes, each `WASI`instance must have its command-line arguments, environment variables, and
* sandbox directory structure configured explicitly.
* @since v13.3.0, v12.16.0
*/
class WASI {
constructor(options?: WASIOptions);
/**
* Attempt to begin execution of `instance` as a WASI command by invoking its`_start()` export. If `instance` does not contain a `_start()` export, or if`instance` contains an `_initialize()`
* export, then an exception is thrown.
*
* `start()` requires that `instance` exports a [`WebAssembly.Memory`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Memory) named`memory`. If
* `instance` does not have a `memory` export an exception is thrown.
*
* If `start()` is called more than once, an exception is thrown.
* @since v13.3.0, v12.16.0
*/
start(instance: object): void; // TODO: avoid DOM dependency until WASM moved to own lib.
/**
* Attempt to initialize `instance` as a WASI reactor by invoking its`_initialize()` export, if it is present. If `instance` contains a `_start()`export, then an exception is thrown.
*
* `initialize()` requires that `instance` exports a [`WebAssembly.Memory`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Memory) named`memory`.
* If `instance` does not have a `memory` export an exception is thrown.
*
* If `initialize()` is called more than once, an exception is thrown.
* @since v14.6.0, v12.19.0
*/
initialize(instance: object): void; // TODO: avoid DOM dependency until WASM moved to own lib.
/**
* `wasiImport` is an object that implements the WASI system call API. This object
* should be passed as the `wasi_snapshot_preview1` import during the instantiation
* of a [`WebAssembly.Instance`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Instance).
* @since v13.3.0, v12.16.0
*/
readonly wasiImport: NodeJS.Dict<any>; // TODO: Narrow to DOM types
}
}
declare module 'node:wasi' {
export * from 'wasi';
} | PypiClean |
/OBITools-1.2.13.tar.gz/OBITools-1.2.13/distutils.ext/obidistutils/serenity/pip/_vendor/requests/packages/urllib3/packages/six.py |
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {}) | PypiClean |
/Chiplotle3-0.4.3-py3-none-any.whl/chiplotle3/tools/hpgltools/relativize.py | from chiplotle3.geometry.core.coordinate import Coordinate
from chiplotle3.hpgl import commands as hpgl
import numpy
## TODO: this works, but is quite ugly. Refactor!
def relativize(data):
'''Converts all absolute coordinate commands (PA, RA, EA, AA)
into relative commands (PR, RR, ER, AR), so that everything
has in realtive coordinate values.'''
## main body...
last_position = None
delta = None
result = [ ]
for e in data:
## absolute...
if isinstance(e, (hpgl.PA, hpgl.RA, hpgl.EA, hpgl.AA)):
if isinstance(e, hpgl.PA):
## handle delta...
if not last_position is None:
delta = e.xy[0] - last_position
last_position = e.xy[-1]
else:
## handle delta...
if not last_position is None:
delta = e.xy - last_position
last_position = e.xy
result += _return_relative_from_absolute(e, delta)
## relative...
elif isinstance(e, (hpgl.PR, hpgl.RR, hpgl.ER, hpgl.AR)):
if isinstance(e, hpgl.PR):
coords = [list(c) for c in e.xy]
if not last_position is None:
last_position += Coordinate(*numpy.sum(coords, axis = 0))
else:
last_position = Coordinate(*numpy.sum(coords, axis = 0))
result.append(e)
else:
last_position = (last_position or 0) + e.xy
else:
result.append(e)
return result
def _return_relative_from_absolute(command, delta):
result = [ ]
if isinstance(command, hpgl.PA):
if delta is not None:
result.append(hpgl.PR([delta]))
coords = [list(c) for c in command.xy]
diff = numpy.diff(coords, axis=0).tolist( )
if len(diff) > 0:
result.append(hpgl.PR(diff))
elif isinstance(command, hpgl.RA) and delta is not None:
result.append(hpgl.RR(delta))
elif isinstance(command, hpgl.EA) and delta is not None:
result.append(hpgl.ER(delta))
elif isinstance(command, hpgl.AA) and delta is not None:
result.append(hpgl.AR(delta))
return result
## Trash...
#def relativize(data):
# '''Converts all absolute coordinate commands (PA, RA, EA, AA)
# into relative commands (PR, RR, ER, AR), so that everything
# has in realtive coordinate values.'''
# def _return_relative_from_absolute(command, delta):
# result = [ ]
# if isinstance(command, hpgl.PA):
# if delta is not None:
# result.append(hpgl.PR(delta))
# diff = numpy.diff(command.xy, axis=0)
# if len(diff) > 0:
# result.append(hpgl.PR(diff))
# elif isinstance(command, hpgl.RA) and delta is not None:
# result.append(hpgl.RR(delta))
# elif isinstance(command, hpgl.EA) and delta is not None:
# result.append(hpgl.ER(delta))
# elif isinstance(command, hpgl.AA) and delta is not None:
# result.append(hpgl.AR(delta))
# return result
#
# ## main body...
# last_position = None
# delta = None
# result = [ ]
# for e in data:
# ## absolute...
# if isinstance(e, (hpgl.PA, hpgl.RA, hpgl.EA, hpgl.AA)):
# ## handle delta...
# if not last_position is None:
# delta = e.xy[0] - last_position
# last_position = e.xy[-1]
# result += _return_relative_from_absolute(e, delta)
# ## relative...
# elif isinstance(e, (hpgl.PR, hpgl.RR, hpgl.ER, hpgl.AR)):
# if not last_position is None:
# last_position += numpy.sum(e.xy, axis = 0)
# else:
# last_position = numpy.sum(e.xy, axis = 0)
# result.append(e)
# else:
# result.append(e)
# return result
# | PypiClean |
/Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/model_implementations/transformers/clip_encoder.py |
# DeepSpeed Team
import torch
from deepspeed.accelerator import get_accelerator
from ..features.cuda_graph import CUDAGraph
class DSClipEncoder(CUDAGraph, torch.nn.Module):
def __init__(self, enc, enable_cuda_graph=False):
super().__init__(enable_cuda_graph=enable_cuda_graph)
enc.text_model._build_causal_attention_mask = self._build_causal_attention_mask
self.enc = enc
self.device = self.enc.device
self.dtype = self.enc.dtype
self.cuda_graph_created = [False, False]
self.static_inputs = [None, None]
self.static_kwargs = [None, None]
self.static_output = [None, None]
self._cuda_graphs = [None, None]
self.iter = 0
self.config = self.enc.config
def _build_causal_attention_mask(self, bsz, seq_len, dtype):
mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype, device=get_accelerator().current_device_name())
mask.fill_(torch.tensor(torch.finfo(dtype).min))
mask.triu_(1)
mask = mask.unsqueeze(1)
return mask
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[self.iter][i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[self.iter][k].copy_(kwargs[k])
self._cuda_graphs[self.iter].replay()
return self.static_output[self.iter]
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created[self.iter]:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
self.iter = (self.iter + 1) % 2
return outputs
else:
return self.enc(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs[self.iter] = torch.cuda.CUDAGraph()
self.static_inputs[self.iter] = inputs
self.static_kwargs[self.iter] = kwargs
with torch.cuda.graph(self._cuda_graphs[self.iter]):
self.static_output[self.iter] = self._forward(*self.static_inputs[self.iter],
**self.static_kwargs[self.iter])
self.cuda_graph_created[self.iter] = True
def _forward(self, *inputs, **kwargs):
return self.enc(*inputs, **kwargs) | PypiClean |
/NanoCap-1.0b12.tar.gz/NanoCap-1.0b12/nanocap/structures/structure.py | from nanocap.core.globals import *
from nanocap.core.util import *
import os,sys,math,copy,random,time,ctypes,shutil,datetime
import numpy
import yaml
import nanocap.core.points as points
from nanocap.core import triangulation,constructdual,calculateschlegel,ringcalculator,output,input
from nanocap.clib import clib_interface
from nanocap.core.structuredata import *
clib = clib_interface.clib
class Structure(object):
'''
Important methods:
get_structure_data - returns this structures data as a
dict using the universal structure
data class. This is then used by
structurelogs and the databases.
The parent class will define common
data but the child specific data needs
to be declared in each subclass
'''
def __init__(self,type):
'''
must declare:
dual_lattice
carbon_lattice
'''
self.type = StructureType(NULL,"NULL","NULL")
self.type=type
self.has_triangles = False
self.has_carbon_bonds = False
self.has_carbon_rings = False
self.has_schlegel = False
self.has_child_structures = False
self.dual_lattice = points.Points(self.type.label+" Dual Lattice Points")
self.dual_lattice.initArrays(0)
self.carbon_lattice = points.Points(self.type.label+" Carbon Lattice Points")
self.carbon_lattice.initArrays(0)
self.carbon_lattice_minimiser = None
self.dual_lattice_minimiser = None
self.seed = -1
self.parent_structure=None
self.dual_lattice_user = CONFIG.opts['User']
self.carbon_lattice_user = CONFIG.opts['User']
self.database = None
def load_carbon_lattice_from_file(self,file,format):
p = input.read_points(file,format)
self.set_carbon_lattice(p.npoints,p.pos)
def load_dual_lattice_from_file(self,file,format):
p = input.read_points(file,format)
self.set_dual_lattice(p.npoints,p.pos)
def get_structure_data(self):
#self.structure_data = StructureData(self)
self.data = copy.deepcopy(DEFAULT_DATA)
printl("initial structure data",self.data)
table = 'dual_lattice_points'
self.data[table]['x'] = self.dual_lattice.pos[0::3]
self.data[table]['y'] = self.dual_lattice.pos[1::3]
self.data[table]['z'] = self.dual_lattice.pos[2::3]
table = 'carbon_lattice_points'
self.data[table]['x'] = self.carbon_lattice.pos[0::3]
self.data[table]['y'] = self.carbon_lattice.pos[1::3]
self.data[table]['z'] = self.carbon_lattice.pos[2::3]
table = 'rings'
try:
self.data[table]['rings_3'] = int(self.ring_info['ringCount'][3])
self.data[table]['rings_4'] = int(self.ring_info['ringCount'][4])
self.data[table]['rings_5'] = int(self.ring_info['ringCount'][5])
self.data[table]['rings_6'] = int(self.ring_info['ringCount'][6])
self.data[table]['rings_7'] = int(self.ring_info['ringCount'][7])
self.data[table]['rings_8'] = int(self.ring_info['ringCount'][8])
except:
pass
table = 'dual_lattices'
self.data[table]['date'] = datetime.datetime.now()
self.data[table]['user_name'] = self.dual_lattice_user
self.data[table]['type'] = self.type.label
self.data[table]['npoints'] = self.dual_lattice.npoints
self.data[table]['energy'] = self.get_dual_lattice_energy()
try:
self.data[table]['ff_id'] = self.dual_lattice_minimiser.FFID
self.data[table]['optimiser'] =self.dual_lattice_minimiser.min_type
except:pass
table = 'carbon_lattices'
self.data[table]['date'] = datetime.datetime.now()
self.data[table]['user_name'] = self.carbon_lattice_user
self.data[table]['type'] = self.type.label
self.data[table]['natoms'] = self.carbon_lattice.npoints
self.data[table]['energy'] = self.get_carbon_lattice_energy()
self.data[table]['energy_constrained'] = self.get_carbon_lattice_scaled_energy()
self.data[table]['energy_per_atom'] = self.get_carbon_lattice_energy_per_atom()
self.data[table]['scale'] = self.get_carbon_lattice_scale()
try:
self.data[table]['ff_id'] = self.carbon_lattice_minimiser.FFID
self.data[table]['ff_options'] = self.carbon_lattice_minimiser.FF.options
self.data[table]['optimiser'] = self.carbon_lattice_minimiser.min_type
self.data[table]['energy_units'] = self.carbon_lattice_minimiser.FF.energy_units
except:pass
return self.data
def set_dual_lattice(self,npoints,pos):
self.dual_lattice = points.Points("{} Dual Lattice Points".format(self.type.label))
self.dual_lattice.initArrays(npoints)
self.dual_lattice.pos = numpy.copy(pos)
def set_carbon_lattice(self,npoints,pos):
self.carbon_lattice = points.Points("{} Carbon Lattice Points".format(self.type.label))
self.carbon_lattice.initArrays(npoints)
self.carbon_lattice.pos = numpy.copy(pos)
self.calculate_child_carbon_lattices()
def calculate_child_carbon_lattices(self):pass
def set_con_carbon_lattice(self,npoints,pos):
#self.carbon_lattice = points.Points("{} Carbon Lattice Points".format(self.type.label))
#self.carbon_lattice.initArrays(npoints)
self.carbon_lattice.constrained_pos = numpy.copy(pos)
def set_carbon_lattice_minimiser(self,minimiser):
#pass
self.carbon_lattice_minimiser = minimiser
def set_dual_lattice_minimiser(self,minimiser):
#pass
self.dual_lattice_minimiser = minimiser
def get_child_structures(self):
'''
if capped nanotube, return the nanotube and cap
'''
return []
def update_child_structures(self):
pass
def export(self,folder=".",
save_info=True,
save_image=False,
save_video=False,
save_carbon_lattice=True,
save_con_carbon_lattice=True,
info_file='structure_info.txt',
save_dual_lattice=True,
formats=['xyz',]):
path = os.path.abspath(os.path.join(folder,self.get_single_line_description()))
try:os.makedirs(path)
except:pass
if(save_image):
from nanocap.rendering.defaults import SCHLEGEL_G,SCHLEGEL_R
self.vtkframe.move_camera([0,0,-100],[0,0,0],[0,1,0])
self.vtkframe.resetCamera()
self.vtkframe.saveImage(os.path.join(path,"isometric.jpg"),overwrite=True,resolution=None)
self.calculate_schlegel(SCHLEGEL_G,SCHLEGEL_R)
self.schlegelframe.saveImage(os.path.join(path,"schlegel.jpg"),overwrite=True,resolution=None)
if(save_dual_lattice):
filename = os.path.join(path,"dual_lattice")
self.export_dual_lattice(filename, formats)
if(save_carbon_lattice):
filename = os.path.join(path,"carbon_lattice")
self.export_carbon_lattice(filename, formats)
if(save_con_carbon_lattice):
filename = os.path.join(path,"constrained_carbon_lattice")
self.export_con_carbon_lattice(filename, formats)
if(save_info):
filename = os.path.join(path,info_file)
f= open(filename,"w")
f.write(self.__repr__())
f.close()
f= open(os.path.splitext(filename)[0]+".yaml","w")
yaml.dump(self.get_structure_data(), f,default_flow_style=False)
f.close()
def export_dual_lattice(self,filename,formats=['xyz',]):
for format in formats:
output.write_points(filename,self.dual_lattice,format,constrained=False)
for child in self.get_child_structures():
child.export_dual_lattice(filename+"_{}".format(child.type.label),formats=formats)
def export_carbon_lattice(self,filename,formats=['xyz',]):
for format in formats:
output.write_points(filename,self.carbon_lattice,format,constrained=False)
for child in self.get_child_structures():
child.export_carbon_lattice(filename+"_{}".format(child.type.label),formats=formats)
def export_con_carbon_lattice(self,filename,formats=['xyz',]):
for format in formats:
output.write_points(filename,self.carbon_lattice,format,constrained=True)
def render(self,render_window=None,dual_lattice=True,carbon_lattice=True,rings=True,triangles=True,
options_holder=None,show=True,render_window_holder=None):
'''
here we should render the structure
if passed a renderer then use that, else pop up a new window.
have a new class that is a renderwindow with its options. Pass it a structure and the options
control toggling of points etc. '''
try:
from nanocap.gui.settings import QtGui,QtCore
from nanocap.gui.widgets import HolderWidget
from nanocap.gui import structureoptionswindow
from nanocap.gui.renderwindow import vtkqtframe
from nanocap.rendering import structureactors
except:
printe("could not import GUI/render libraries, will not render")
return
if(options_holder==None and render_window_holder==None):
app = QtGui.QApplication(sys.argv)
if(render_window==None):
self.render_window = QtGui.QTabWidget()
self.vtkframe = vtkqtframe.VtkQtFrame(0)
self.schlegelframe = vtkqtframe.VtkQtFrame(0)
self.schlegelframe.move_camera(numpy.array([0,0,10]),numpy.array([0,0,0]),numpy.array([0,1,0]))
self.render_window.addTab(self.vtkframe,"3D View")
self.render_window.addTab(self.schlegelframe,"Schlegel View")
self.render_window.vtkframe = self.vtkframe
self.render_window.schlegelframe = self.schlegelframe
# if(show):
# self.vtkframe.show()
# self.schlegelframe.show()
else:self.render_window = render_window
self.structure_actors = structureactors.StructureActors(self)
self.options_window = structureoptionswindow.StructureOptionsWindow(self)
#self.options_window = QtGui.QWidget()
#self.options_window.hide()
#if(render_window!=None):holder.addWidget(self.render_window)
if(options_holder!=None):
options_holder.addWidget(self.options_window)
if(render_window_holder!=None):
render_window_holder.addWidget(self.render_window)
self.render_window.vtkframe.centerCameraOnPointSet(self.carbon_lattice)
for child_structure in self.get_child_structures():
printl("setting child_structure actors",child_structure.type.label)
child_structure.render_window = self.render_window
child_structure.structure_actors = structureactors.StructureActors(child_structure)
child_structure.options_window = structureoptionswindow.StructureOptionsWindow(child_structure)
self.options_window.points_widgets_holder.addHeader(child_structure.type.label,
bold=True,frame=False)
self.options_window.points_widgets_holder.addWidget(child_structure.options_window.render_points_table)
if(options_holder==None and render_window_holder==None):
self.window = HolderWidget()
self.window.containerLayout.setSpacing(0)
if(options_holder==None):self.window.addWidget(self.options_window)
if(render_window_holder==None):self.window.addWidget(self.render_window)
#child_structure.render(options_holder=self.window,
# render_window=self.render_window)
if(show):
#app = QtGui.QApplication(sys.argv)
mw = QtGui.QMainWindow()
mw.setCentralWidget(self.window)
mw.show()
self.window.show()
app.exec_()
#sys.exit(app.exec_())
else:
if(show):
pass
# self.options_window.show()
# for child_structure in self.get_child_structures():
# child_structure.options_window.show()
# self.render_window.show()
printl("end render")
#self.window.show()
def render_update(self):
printl("in render update",self.type.label)
try:
from nanocap.gui.settings import QtGui,QtCore
self.options_window.emit(QtCore.SIGNAL("update_structure()"))
self.structure_actors.update_actors()
self.vtkframe.center_on_load()
self.schlegelframe.center_on_load()
except:
pass
try:
for child_structure in self.get_child_structures():
child_structure.render_update()
except:
pass
printl("emitted update_structure",self.type.label)
def show(self):
self.options_window.show()
self.render_window.show()
def hide(self):
self.options_window.hide()
self.render_window.hide()
def __repr__(self):
self.twidth = 80
self.sepformat = "{0:=^"+str(self.twidth)+"} \n"
self.col1 = "{0:<"+str(self.twidth)+"} \n"
self.col1h = "{0:-^"+str(self.twidth)+"} \n"
self.col1c = "{0:.^"+str(self.twidth)+"} \n"
self.col2 = "{0:<"+str(int(self.twidth/2))+"} {1:<"+str(int(self.twidth/2))+"}\n"
self.col3 = "{0:<"+str(int(self.twidth/3))+"} {1:<"+str(int(self.twidth/3))+"} {2:<"+str(int(self.twidth/3))+"}\n"
self.col4 = "{0:<"+str(int(self.twidth/4))+"} {1:<"+str(int(self.twidth/4))+"} {2:<"+str(int(self.twidth/4))+"} {3:<"+str(int(self.twidth/4))+"}\n"
out = ""
out += self.sepformat.format("C"+str(self.carbon_lattice.npoints)+" "+self.type.label)
tables = ['dual_lattices',
'carbon_lattices',
'rings','users']
fdata = self.format_data()
for table in tables:
out += self.col1c.format(table)
for key,d in fdata[table].items():
out += self.col2.format(key,d)
return out
def format_data(self):
data = self.get_structure_data()
printl(data.keys())
tables = ['dual_lattices',
'carbon_lattices',
'rings','users']
exclude = ['type',]
#for table in data.keys():
out = {}
for table in tables:
#out += self.col1c.format(table)
header = table
out[header] = {}
cols = []
for field in data[table].keys():
if(field in exclude):continue
d = data[table][field]
if(d!=None):
#out += self.col2.format(field,d)
if(isinstance(d, datetime.datetime)):
d = d.strftime("%Y-%m-%d %H:%M:%S")
cols.append((field,d))
if(len(cols)>0):
#out += self.col1c.format(table)
for c in cols:
#out += self.col2.format(*c)
out[header][c[0]] = c[1]
return out
def calculate_structural_info(self):
pass
def get_single_line_description(self,carbon_lattice=True,dual_lattice=True,carbonEnergy=True):
des=self.type.text
if(carbon_lattice):des+="_Nc_"+str(self.carbon_lattice.npoints)
if(dual_lattice):des+="_Nt_"+str(self.dual_lattice.npoints)
if(carbonEnergy):
try:des+="_Energy_"+str(self.get_carbon_lattice_energy())
except:
if(self.get_dual_lattice_energy()>0):
des+="_Energy_"+str(self.get_dual_lattice_energy())
else:
if(self.get_dual_lattice_energy()>0):
des+="_Energy_"+str(self.get_dual_lattice_energy())
if(self.has_carbon_rings):
if(self.ring_info['ringCount'][5]>0):
IPperc = float(self.ring_info['isolatedPentagons'])/float(self.ring_info['ringCount'][5])*100.0
des+="_IP%_"+str(IPperc)
return des
def get_GUI_description(self,carbon_lattice=True,dual_lattice=True,carbonEnergy=True):
if(self.get_dual_lattice_energy()==0):
des = "C{} {}".format(self.carbon_lattice.npoints,self.type.label)
else:
des = "C{} {}: Dual Lattice Energy {} ".format(self.carbon_lattice.npoints,self.type.label,
self.get_dual_lattice_energy())
return des
def get_points(self,key):
if(key=="DualLattice"):return self.dual_lattice
if(key=="CarbonAtoms"):return self.carbon_lattice
if(key=="DualLattice_S"):
if(self.has_schlegel):return self.schlegel_dual_lattice
else:
printl("Schlegel not calculated")
return None
if(key=="CarbonAtoms_S"):
if(self.has_schlegel):return self.schlegel_carbon_lattice
else:
printl("Schlegel not calculated")
return None
def triangulate_dual_lattice(self):
self.vertlist,self.ntriangles = triangulation.delaunyTriangulation(self.dual_lattice)
self.has_triangles=True
def construct_carbon_lattice(self):
printd("constructing carbon atoms from dual lattice")
self.triangulate_dual_lattice()
self.carbon_lattice = constructdual.constructDual(self.dual_lattice,self.ntriangles,
self.vertlist,
outlabel=self.type.label+" Carbon Atoms")
def calculate_rings(self):
printl("calculate_rings")
self.ring_info = ringcalculator.calculate_rings(self.carbon_lattice,MaxNebs=3,MaxVerts=9)
self.has_carbon_rings = True
if(self.ring_info['nrings']==0):self.has_carbon_rings=False
def calculate_schlegel(self,gamma,cutoff):
self.schlegel_dual_lattice = calculateschlegel.calculate_schlegel_projection(self.dual_lattice,gamma)
self.schlegel_carbon_lattice = calculateschlegel.calculate_schlegel_projection(self.carbon_lattice,gamma)
self.schlegel_carbon_lattice_full = copy.deepcopy(self.schlegel_carbon_lattice)
z = self.carbon_lattice.pos[2::3]
todelete = numpy.where(z>cutoff)[0]
self.schlegel_carbon_lattice.removeIndexes(todelete)
z = self.dual_lattice.pos[2::3]
todelete = numpy.where(z>cutoff)[0]
self.schlegel_dual_lattice.removeIndexes(todelete)
if(self.has_carbon_rings):
self.schlegel_ring_info = copy.deepcopy(self.ring_info)
for i in range(0,self.ring_info['nrings']):
for j in range(0,self.ring_info['VertsPerRingCount'][i]):
index = self.ring_info['Rings'][i*self.ring_info['MaxVerts'] + j]
if(self.carbon_lattice.pos[index*3+2]>cutoff):
self.schlegel_ring_info['VertsPerRingCount'][i] =0
break
for i in range(0,self.ring_info['nrings']):
printl("Schlegel ring {} verts {} ".format(i,self.schlegel_ring_info['VertsPerRingCount'][i]))
self.has_schlegel = True
def calculate_carbon_bonds(self):
maxbonds = 10
AvBondLength = numpy.zeros(self.carbon_lattice.npoints,NPF)
clib.get_average_bond_length(ctypes.c_int(self.carbon_lattice.npoints),
self.carbon_lattice.pos.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
AvBondLength.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
cutoff = numpy.average(AvBondLength)*1.2
mybonds = numpy.zeros(self.carbon_lattice.npoints*2*maxbonds, NPI)
clib.calc_carbon_bonds.restype = ctypes.c_int
nbonds=clib.calc_carbon_bonds(ctypes.c_int(self.carbon_lattice.npoints),
self.carbon_lattice.pos.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
mybonds.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
ctypes.c_double(cutoff))
self.nbonds = nbonds
self.bonds = mybonds
self.has_carbon_bonds = True
def calculate_surface_area_volume(self):
try:
self.ring_info['nrings']
except:
printl("no rings detects cannot calc surface area and vol")
self.volume=0
self.surface_area=0
return
if(self.ring_info['nrings']==0):
printl("no rings detects cannot calc surface area and vol")
self.volume=0
self.surface_area=0
return
printl("using nrings",self.ring_info['nrings'],"to determine area and volume")
'''
was going to retriangulate the carbon atoms, but there is no need since the
rings have been calculated. Use these along with the normals to determine the
volume using Gauss' thereom.
'''
stime = time.time()
self.surface_area,self.volume = ringcalculator.calculate_volume_from_rings(self.carbon_lattice,
self.ring_info['nrings'],
self.ring_info['MaxVerts'],
self.ring_info['Rings'],
self.ring_info['VertsPerRingCount'])
printl("surface_area",self.surface_area,"volume", self.volume)
printl("C time for vol calc",time.time()-stime)
def reset(self,seed=None):
self.construct_dual_lattice(N_dual=self.dual_lattice.npoints,seed=seed)
def get_dual_lattice_energy(self):
return float(self.dual_lattice.final_energy)
def get_carbon_lattice_energy(self):
try:
if(abs(self.carbon_lattice.final_energy)<1e-5):return float(self.dual_lattice.final_energy)
else:return float(self.carbon_lattice.final_energy)
except: 0
def get_carbon_lattice_energy_per_atom(self):
try:return float(self.carbon_lattice.final_energy)/self.carbon_lattice.npoints
except: return 0
def get_carbon_lattice_scale(self):
try:return float(self.carbon_lattice.final_scale)
except: return 0
def get_carbon_lattice_scaled_energy(self):
try:return float(self.carbon_lattice.final_scaled_energy)
except: return 0 | PypiClean |
/AppTrack-1.0.tar.gz/AppTrack-1.0/apptrack/codecs.py | from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases() # noqa
from builtins import object
from past.builtins import basestring
from opentracing import (
InvalidCarrierException,
SpanContextCorruptedException,
)
from .constants import (
BAGGAGE_HEADER_PREFIX,
DEBUG_ID_HEADER_KEY,
TRACE_ID_HEADER,
)
from .span_context import SpanContext
from .constants import SAMPLED_FLAG, DEBUG_FLAG
import six
import urllib.parse
import json
import struct
class Codec(object):
def inject(self, span_context, carrier):
raise NotImplementedError()
def extract(self, carrier):
raise NotImplementedError()
class TextCodec(Codec):
def __init__(self,
url_encoding=False,
trace_id_header=TRACE_ID_HEADER,
baggage_header_prefix=BAGGAGE_HEADER_PREFIX,
debug_id_header=DEBUG_ID_HEADER_KEY):
self.url_encoding = url_encoding
self.trace_id_header = trace_id_header.lower().replace('_', '-')
self.baggage_prefix = baggage_header_prefix.lower().replace('_', '-')
self.debug_id_header = debug_id_header.lower().replace('_', '-')
self.prefix_length = len(baggage_header_prefix)
def inject(self, span_context, carrier):
if not isinstance(carrier, dict):
raise InvalidCarrierException('carrier not a collection')
carrier[self.trace_id_header] = span_context_to_string(
trace_id=span_context.trace_id, span_id=span_context.span_id,
parent_id=span_context.parent_id, flags=span_context.flags)
baggage = span_context.baggage
if baggage:
for key, value in six.iteritems(baggage):
encoded_key = key
if self.url_encoding:
#python2需要将value转换成字节
encoded_value = urllib.parse.quote(bytes(value))
if six.PY2 and isinstance(key, six.text_type):
encoded_key = key.encode('utf-8')
else:
encoded_value = value
header_key = '%s%s' % (self.baggage_prefix, encoded_key)
carrier[header_key] = encoded_value
def extract(self, carrier):
if not hasattr(carrier, 'items'):
raise InvalidCarrierException('carrier not a collection')
trace_id, span_id, parent_id, flags = None, None, None, None
baggage = None
debug_id = None
for key, value in six.iteritems(carrier):
uc_key = key.lower()
if uc_key == self.trace_id_header:
if self.url_encoding:
value = urllib.parse.unquote(value)
trace_id, span_id, parent_id, flags = \
span_context_from_string(value)
elif uc_key.startswith(self.baggage_prefix):
if self.url_encoding:
value = urllib.parse.unquote(value)
attr_key = key[self.prefix_length:]
if baggage is None:
baggage = {attr_key.lower(): value}
else:
baggage[attr_key.lower()] = value
elif uc_key == self.debug_id_header:
if self.url_encoding:
value = urllib.parse.unquote(value)
debug_id = value
if not trace_id and baggage:
raise SpanContextCorruptedException('baggage without trace ctx')
if not trace_id:
if debug_id is not None:
return SpanContext.with_debug_id(debug_id=debug_id)
return None
return SpanContext(trace_id=trace_id, span_id=span_id,
parent_id=parent_id, flags=flags,
baggage=baggage)
class BinaryCodec(Codec):
def __init__(self, trace_id_header=TRACE_ID_HEADER):
self.trace_id_header = trace_id_header.lower().replace('_', '-')
def inject(self, span_context, carrier):
if not isinstance(carrier, bytearray):
raise InvalidCarrierException('carrier not a bytearray')
baggage = span_context.baggage.copy()
baggage[self.trace_id_header] = span_context_to_string(
trace_id=span_context.trace_id, span_id=span_context.span_id,
parent_id=span_context.parent_id, flags=span_context.flags)
bin_baggage = bytearray(json.dumps(baggage))
carrier.extend(bytearray(struct.pack("I", len(bin_baggage))))
carrier.extend(bin_baggage)
def extract(self, carrier):
if not isinstance(carrier, bytearray):
raise InvalidCarrierException('carrier not a bytearray')
ctx_len = struct.unpack('I', carrier[0:4])[0]
carrier = json.loads(str(carrier[4:4 + ctx_len]))
trace_id, span_id, parent_id, flags = None, None, None, None
baggage = None
for key, value in six.iteritems(carrier):
uc_key = key.lower()
if uc_key == self.trace_id_header:
trace_id, span_id, parent_id, flags = \
span_context_from_string(value)
else:
if baggage is None:
baggage = {uc_key: value}
else:
baggage[uc_key] = value
if baggage == None or (not isinstance(baggage, dict)):
raise SpanContextCorruptedException()
return SpanContext(trace_id=trace_id, span_id=span_id,
parent_id=parent_id, flags=flags,
baggage=baggage)
def span_context_to_string(trace_id, span_id, parent_id, flags):
"""
Serialize span ID to a string
{trace_id}:{span_id}:{parent_id}:{flags}
Numbers are encoded as variable-length lower-case hex strings.
If parent_id is None, it is written as 0.
:param trace_id:
:param span_id:
:param parent_id:
:param flags:
"""
parent_id = parent_id or 0
def convert_id_to_int(_id):
if type(_id) == str:
if is_hex_str(_id):
_id = int(_id,16)
else:
_id = int(_id)
assert(type(_id)==int or type(_id)==long)
return _id
trace_id = convert_id_to_int(trace_id)
span_id = convert_id_to_int(span_id)
parent_id = convert_id_to_int(parent_id)
flags = convert_id_to_int(flags)
#转换为16进制数
return '{:x}:{:x}:{:x}:{:x}'.format(trace_id, span_id, parent_id, flags)
def span_context_from_string(value):
"""
Decode span ID from a string into a TraceContext.
Returns None if the string value is malformed.
:param value: formatted {trace_id}:{span_id}:{parent_id}:{flags}
"""
if type(value) is list and len(value) > 0:
# sometimes headers are presented as arrays of values
if len(value) > 1:
raise SpanContextCorruptedException(
'trace context must be a string or array of 1: "%s"' % value)
value = value[0]
if not isinstance(value, basestring):
raise SpanContextCorruptedException(
'trace context not a string "%s"' % value)
parts = value.split(':')
if len(parts) != 4:
raise SpanContextCorruptedException(
'malformed trace context "%s"' % value)
try:
trace_id = int(parts[0], 16)
span_id = int(parts[1], 16)
parent_id = int(parts[2], 16)
flags = int(parts[3], 16)
if trace_id < 1 or span_id < 1 or parent_id < 0 or flags < 0:
raise SpanContextCorruptedException(
'malformed trace context "%s"' % value)
if parent_id == 0:
parent_id = None
return trace_id, span_id, parent_id, flags
except ValueError as e:
raise SpanContextCorruptedException(
'malformed trace context "%s": %s' % (value, e))
def header_to_hex(header):
if not isinstance(header, (str, six.text_type)):
raise SpanContextCorruptedException(
'malformed trace context "%s", expected hex string' % header)
try:
return int(header, 16)
except ValueError:
raise SpanContextCorruptedException(
'malformed trace context "%s", expected hex string' % header)
def is_hex_str(txt):
digits = '0123456789abcdef'
for i in txt.lower():
if i not in digits:
raise RuntimeError("error hex alphabet")
elif i in digits[10:]:
return True
return False | PypiClean |
/AnyBlok-2.1.0.tar.gz/AnyBlok-2.1.0/anyblok/model/plugins.py | from logging import getLogger
from pkg_resources import iter_entry_points
logger = getLogger(__name__)
def get_model_plugins(registry):
res = []
for i in iter_entry_points("anyblok.model.plugin"):
logger.info("AnyBlok Load model plugin: %r" % i)
res.append(i.load()(registry))
return res
class ModelPluginBase:
def __init__(self, registry):
self.registry = registry
# def initialisation_tranformation_properties(self, properties,
# transformation_properties):
# """ Initialise the transform properties
# :param properties: the properties declared in the model
# :param new_type_properties: param to add in a new base if need
# """
# def declare_field(self, name, field, namespace, properties,
# transformation_properties):
# """Declare a field in the model
# :param name: field name
# :param field: field instance
# :param namespace: the namespace of the model
# :param properties: the properties of the model
# :param transformation_properties: the transformation properties
# """
# def transform_base_attribute(self, attr, method, namespace, base,
# transformation_properties,
# new_type_properties):
# """ transform the attribute for the final Model
# :param attr: attribute name
# :param method: method pointer of the attribute
# :param namespace: the namespace of the model
# :param base: One of the base of the model
# :param transformation_properties: the properties of the model
# :param new_type_properties: param to add in a new base if need
# """
# def transform_base(self, namespace, base,
# transformation_properties,
# new_type_properties):
# """ transform the base for the final Model
# :param namespace: the namespace of the model
# :param base: One of the base of the model
# :param transformation_properties: the properties of the model
# :param new_type_properties: param to add in a new base if need
# """
# def insert_in_bases(self, new_base, namespace, properties,
# transformation_properties):
# """Insert in a base the overload
# :param new_base: the base to be put on front of all bases
# :param namespace: the namespace of the model
# :param properties: the properties declared in the model
# :param transformation_properties: the properties of the model
# """
# def after_model_construction(self, base, namespace,
# transformation_properties):
# """Do some action with the constructed Model
# :param base: the Model class
# :param namespace: the namespace of the model
# :param transformation_properties: the properties of the model
# """ | PypiClean |
/Jian-0.1.tar.gz/Jian-0.1/jian/jwt.py | from functools import wraps
from flask_jwt_extended import JWTManager, verify_jwt_in_request, get_current_user, create_access_token, \
create_refresh_token
from .exception import AuthFailed, InvalidTokenException, ExpiredTokenException, NotFound
jwt = JWTManager()
def admin_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
verify_jwt_in_request()
current_user = get_current_user()
if not current_user.is_super:
raise AuthFailed(msg='只有超级管理员可操作')
return fn(*args, **kwargs)
return wrapper
def group_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
verify_jwt_in_request()
current_user = get_current_user()
# check current user is active or not
# 判断当前用户是否为激活状态
if not current_user.is_active:
raise AuthFailed(msg='您目前处于未激活状态,请联系超级管理员')
# not super
if not current_user.is_super:
group_id = current_user.group_id
if group_id is None:
raise AuthFailed(msg='您还不属于任何权限组,请联系超级管理员获得权限')
from .core import is_user_allowed
it = is_user_allowed(group_id)
if not it:
raise AuthFailed(msg='权限不够,请联系超级管理员获得权限')
else:
return fn(*args, **kwargs)
else:
return fn(*args, **kwargs)
return wrapper
def login_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
verify_jwt_in_request()
return fn(*args, **kwargs)
return wrapper
@jwt.user_loader_callback_loader
def user_loader_callback(identity):
from .core import find_user
# token is granted , user must be exit
# 如果token已经被颁发,则该用户一定存在
user = find_user(id=identity)
if user is None:
raise NotFound(msg='用户不存在')
return user
@jwt.expired_token_loader
def expired_loader_callback():
return ExpiredTokenException()
@jwt.invalid_token_loader
def invalid_loader_callback(e):
return InvalidTokenException()
@jwt.unauthorized_loader
def unauthorized_loader_callback(e):
return AuthFailed(msg='认证失败,请检查请求头或者重新登陆')
def get_tokens(user):
access_token = create_access_token(identity=user.id)
refresh_token = create_refresh_token(identity=user.id)
return access_token, refresh_token | PypiClean |
/APtools-0.2.4-py3-none-any.whl/aptools/plotting/quick_diagnostics.py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.patheffects as path_effects
import scipy.constants as ct
import aptools.data_analysis.beam_diagnostics as bd
from aptools.data_handling.reading import read_beam
from aptools.plotting.plot_types import scatter_histogram
from aptools.plotting.rc_params import rc_params
from aptools.plotting.utils import (
add_projection, create_vertical_colorbars, add_text)
from aptools.helper_functions import (
get_only_statistically_relevant_slices, weighted_std)
def phase_space_overview_from_file(
code_name, file_path, rasterized_scatter=None, show=True, **kwargs):
x, y, z, px, py, pz, q = read_beam(code_name, file_path, **kwargs)
phase_space_overview(x, y, z, px, py, pz, q,
rasterized_scatter=rasterized_scatter,
show=show)
def phase_space_overview(x, y, z, px, py, pz, q, rasterized_scatter=None,
show=True):
em_x = bd.normalized_transverse_rms_emittance(x, px, w=q) * 1e6
em_y = bd.normalized_transverse_rms_emittance(y, py, w=q) * 1e6
a_x, b_x, g_x = bd.twiss_parameters(x, px, pz, w=q)
a_y, b_y, g_y = bd.twiss_parameters(y, py, pz, w=q)
s_x = bd.rms_size(x, w=q)
s_y = bd.rms_size(y, w=q)
em_l = bd.longitudinal_rms_emittance(z, px, py, pz, w=q) * 1e6
dz = z - np.average(z, weights=q)
s_z = bd.rms_length(z, w=q)
s_g = bd.relative_rms_energy_spread(pz, py, pz, w=q)
s_g_sl, w_sl, sl_ed, s_g_sl_av = bd.relative_rms_slice_energy_spread(
z, px, py, pz, w=q, n_slices=10)
c_prof, _ = bd.current_profile(z, q, n_slices=50)
c_peak = max(abs(c_prof))/1e3 # kA
# s_g_sl_c = s_g_sl[int(len(s_g_sl)/2)]
# make plot
plt.figure(figsize=(8, 3))
text_labels = []
with plt.rc_context(rc_params):
# x - px
ax_1 = plt.subplot(131)
scatter_histogram(x*1e6, px, rasterized=rasterized_scatter)
plt.xlabel("x [$\\mu m$]")
plt.ylabel("$p_x \\ \\mathrm{[m_e c]}$")
text_labels += [
plt.text(0.1, 0.9, '$\\epsilon_{n,x} = $'
+ '{}'.format(np.around(em_x, 3))
+ '$\\ \\mathrm{\\mu m}$',
transform=ax_1.transAxes, fontsize=8),
plt.text(0.1, 0.8,
'$\\beta_{x} = $' + '{}'.format(np.around(b_x, 3))
+ 'm', transform=ax_1.transAxes, fontsize=8),
plt.text(0.1, 0.7,
'$\\alpha_{x} = $' + '{}'.format(np.around(a_x, 3)),
transform=ax_1.transAxes, fontsize=8),
plt.text(0.1, 0.6, '$\\sigma_{x} = $'
+ '{}'.format(np.around(s_x*1e6, 3))
+ '$\\ \\mathrm{\\mu m}$', transform=ax_1.transAxes,
fontsize=8),
]
# y - py
ax_2 = plt.subplot(132)
scatter_histogram(y * 1e6, py, rasterized=rasterized_scatter)
plt.xlabel("y [$\\mu m$]")
plt.ylabel("$p_y \\ \\mathrm{[m_e c]}$")
text_labels += [
plt.text(0.1, 0.9, '$\\epsilon_{n,y} = $'
+ '{}'.format(np.around(em_y, 3))
+ '$\\ \\mathrm{\\mu m}$',
transform=ax_2.transAxes, fontsize=8),
plt.text(0.1, 0.8,
'$\\beta_{y} = $' + '{}'.format(np.around(b_y, 3))
+ 'm', transform=ax_2.transAxes, fontsize=8),
plt.text(0.1, 0.7,
'$\\alpha_{y} = $' + '{}'.format(np.around(a_y, 3)),
transform=ax_2.transAxes, fontsize=8),
plt.text(0.1, 0.6, '$\\sigma_{y} = $'
+ '{}'.format(np.around(s_y*1e6, 3))
+ '$\\ \\mathrm{\\mu m}$', transform=ax_2.transAxes,
fontsize=8)
]
# z - pz
ax_3 = plt.subplot(133)
scatter_histogram(dz / ct.c * 1e15, pz, rasterized=rasterized_scatter)
plt.xlabel("$\\Delta z$ [fs]")
plt.ylabel("$p_z \\ \\mathrm{[m_e c]}$")
text_labels += [
plt.text(0.1, 0.9, '$\\epsilon_{L} = $'
+ '{}'.format(np.around(em_l, 3))
+ '$\\ \\mathrm{\\mu m}$', transform=ax_3.transAxes,
fontsize=8),
plt.text(0.1, 0.8, '$\\sigma_\\gamma/\\gamma=$'
+ '{}'.format(np.around(s_g*1e2, 3)) + '$\\%$',
transform=ax_3.transAxes, fontsize=8),
plt.text(0.1, 0.7, '$\\sigma^s_\\gamma/\\gamma=$'
+ '{}'.format(np.around(s_g_sl_av*1e2, 3)) + '$\\%$',
transform=ax_3.transAxes, fontsize=8),
plt.text(0.1, 0.6, '$\\sigma_z=$'
+ '{}'.format(np.around(s_z/ct.c*1e15, 3)) + ' fs',
transform=ax_3.transAxes, fontsize=8),
plt.text(0.1, 0.5, '$I_{peak}=$'
+ '{}'.format(np.around(c_peak, 2)) + ' kA',
transform=ax_3.transAxes, fontsize=8)
]
for label in text_labels:
label.set_path_effects(
[path_effects.Stroke(linewidth=1, foreground='white'),
path_effects.Normal()])
plt.tight_layout()
if show:
plt.show()
def slice_analysis(x, y, z, px, py, pz, q, n_slices=50, len_slice=None,
ene_bins=50, left=0.125, right=0.875, top=0.98, bottom=0.13,
xlim=None, ylim=None, add_labels=False, include_twiss=False,
fig=None, rasterized_scatter=None, show=True):
# analyze beam
current_prof, z_edges = bd.current_profile(z, q, n_slices=n_slices,
len_slice=len_slice)
ene_spectrum, ene_spec_edgs = bd.energy_spectrum(px, py, pz, w=q,
bins=ene_bins)
slice_ene, *_ = bd.energy_profile(
z, px, py, pz, w=q, n_slices=n_slices, len_slice=len_slice)
slice_ene_sp, *_ = bd.relative_rms_slice_energy_spread(
z, px, py, pz, w=q, n_slices=n_slices, len_slice=len_slice)
sl_tw, sl_w, *_ = bd.slice_twiss_parameters(
z, x, px, pz, w=q, n_slices=n_slices, len_slice=len_slice)
alpha_x, *_ = get_only_statistically_relevant_slices(
sl_tw[0], sl_w, replace_with_nans=True)
beta_x, *_ = get_only_statistically_relevant_slices(
sl_tw[1], sl_w, replace_with_nans=True)
sl_tw, *_ = bd.slice_twiss_parameters(
z, y, py, pz, w=q, n_slices=n_slices, len_slice=len_slice)
alpha_y, *_ = get_only_statistically_relevant_slices(
sl_tw[0], sl_w, replace_with_nans=True)
beta_y, *_ = get_only_statistically_relevant_slices(
sl_tw[1], sl_w, replace_with_nans=True)
slice_em_x, *_ = bd.normalized_transverse_rms_slice_emittance(
z, x, px, w=q, n_slices=n_slices, len_slice=len_slice)
slice_em_y, *_ = bd.normalized_transverse_rms_slice_emittance(
z, y, py, w=q, n_slices=n_slices, len_slice=len_slice)
s_z = bd.rms_length(z, w=q)
len_fwhm = bd.fwhm_length(z, q, n_slices=n_slices, len_slice=len_slice)
ene_sp_tot = bd.relative_rms_energy_spread(px, py, pz, w=q)
# perform operations
gamma = np.sqrt(1 + px**2 + py**2 + pz**2)
ene = gamma * ct.m_e*ct.c**2/ct.e * 1e-9 # GeV
z_center = np.average(z, weights=q)
dz = z_edges[1] - z_edges[0]
slice_z = (z_edges[1:] - dz/2 - z_center) * 1e6 # micron
current_prof = np.abs(current_prof) * 1e-3 # kA
peak_current = np.nanmax(current_prof)
s_t = s_z * 1e15/ct.c
len_fwhm *= 1e15/ct.c # fs
slice_ene *= ct.m_e*ct.c**2/ct.e * 1e-9 # GeV
ene_spec_edgs = ene_spec_edgs[:-1] + (ene_spec_edgs[1]-ene_spec_edgs[0])/2
ene_spec_edgs *= ct.m_e*ct.c**2/ct.e * 1e-9 # GeV
slice_ene_sp *= 1e2 # %
ene_sp_tot *= 1e2 # %
slice_em_x *= 1e6 # micron
slice_em_y *= 1e6 # micron
max_beta = np.nanmax(beta_x)
if max_beta <= 0.1:
beta_units = 'mm'
beta_x *= 1e3
beta_y *= 1e3
else:
beta_units = 'm'
max_ene = np.nanmax(ene)
if max_ene <= 1:
ene_units = 'MeV'
ene *= 1e3
ene_spec_edgs *= 1e3
else:
ene_units = 'GeV'
ene_mean = np.average(ene, weights=q)
# make plot
if include_twiss:
nrows = 3
hr = [2.5, 1, 1]
fh = 3.3
else:
nrows = 2
hr = [2.5, 1]
fh = 2.5
if fig is None:
fig = plt.figure(figsize=(4, fh))
gs = gridspec.GridSpec(nrows, 2, height_ratios=hr,
width_ratios=[1, 0.02], hspace=0.1, wspace=0.05,
figure=fig, left=left, right=right,
top=top, bottom=bottom)
leg_frac = 0.25 # space to reserve for legend
with plt.rc_context(rc_params):
ax_or = plt.subplot(gs[0])
pscatt = scatter_histogram((z-z_center)*1e6, ene, bins=300,
weights=np.abs(q)*1e15,
rasterized=rasterized_scatter)
plt.ylabel('Energy [{}]'.format(ene_units))
plt.tick_params(axis='x', which='both', labelbottom=False)
params_text = ('$\\langle E \\rangle = '
+ '{:0.1f}$ {}\n'.format(ene_mean, ene_units)
+ '$\\sigma_\\mathrm{E,rel}='
+ '{:0.1f}$ %\n'.format(ene_sp_tot)
+ '$I_\\mathrm{peak}='
+ '{:0.1f}$ kA\n'.format(peak_current)
+ '$\\sigma_t='
+ '{:0.1f}$ fs'.format(s_t))
plt.text(0.98, 0.95, params_text, transform=ax_or.transAxes,
fontsize=6, horizontalalignment='right',
verticalalignment='top')
if add_labels:
plt.text(0.03, 0.05, '(a)', transform=ax_or.transAxes, fontsize=6,
horizontalalignment='left', verticalalignment='bottom')
if xlim is None:
xlim = list(plt.xlim())
xlim[0] -= (xlim[1] - xlim[0])/8
xlim[1] += (xlim[1] - xlim[0])/3
plt.xlim(xlim)
if ylim is None:
ylim = list(plt.ylim())
ylim[0] -= (ylim[1] - ylim[0])/3
plt.ylim(ylim)
# current profile plot
z_or = ax_or.get_zorder()
pos = list(ax_or.get_position().bounds)
pos[3] /= 5
ax_or.patch.set_alpha(0)
ax = fig.add_axes(pos)
ax.set_zorder(z_or-1)
plt.plot(slice_z, current_prof, c='k', lw=0.5, alpha=0.5)
plt.fill_between(slice_z, current_prof, facecolor='tab:gray',
alpha=0.3)
ax.spines['left'].set_position('zero')
ax.spines['left'].set_color('tab:grey')
ax.tick_params(axis='y', colors='tab:grey', labelsize=6,
direction="in", pad=-4)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.tick_params(axis='x', which='both', labelbottom=False)
for label in ax.yaxis.get_ticklabels():
label.set_horizontalalignment('left')
label.set_verticalalignment('bottom')
plt.xlim(xlim)
ylim_c = list(plt.ylim())
ylim_c[0] = 0
plt.ylim(ylim_c)
plt.ylabel('I [kA]', color='tab:gray', fontsize=6)
# energy profile plot
pos = list(ax_or.get_position().bounds)
pos[2] /= 8
ax = fig.add_axes(pos)
ax.set_zorder(z_or-1)
plt.plot(ene_spectrum, ene_spec_edgs, c='k', lw=0.5, alpha=0.5)
plt.fill_betweenx(ene_spec_edgs, ene_spectrum, facecolor='tab:gray',
alpha=0.3)
plt.gca().axis('off')
plt.ylim(ylim)
xlim_e = list(plt.xlim())
xlim_e[0] = 0
plt.xlim(xlim_e)
# colorbar
ax = plt.subplot(gs[1])
matplotlib.colorbar.Colorbar(ax, pscatt, label='Q [fC]')
# slice parameters plot
plt.subplot(gs[2])
l1 = plt.plot(slice_z, slice_ene_sp, lw=1, c='tab:green',
label='$\\sigma_\\gamma/\\gamma$')
plt.ylabel('$\\sigma_\\gamma/\\gamma$ [%]')
if include_twiss:
plt.tick_params(axis='x', which='both', labelbottom=False)
else:
plt.xlabel('$\\Delta z \\ [\\mathrm{\\mu m}]$')
# make room for legend
# ylim = list(plt.ylim())
# ylim[1] += (ylim[1] - ylim[0]) * leg_frac
plt.xlim(xlim)
# plt.ylim(ylim)
ax = plt.twinx()
l2 = plt.plot(slice_z, slice_em_x, lw=1, c='tab:blue',
label='$\\epsilon_{n,x}$')
l3 = plt.plot(slice_z, slice_em_y, lw=1, c='tab:orange',
label='$\\epsilon_{n,y}$')
plt.ylabel('$\\epsilon_{n} \\ [\\mathrm{\\mu m}]$')
# make room for legend
# ylim = list(plt.ylim())
# ylim[1] += (ylim[1] - ylim[0]) * leg_frac
# plt.ylim(ylim)
lines = l1 + l2 + l3
labels = [line.get_label() for line in lines]
plt.legend(lines, labels, fontsize=6, frameon=False,
loc='center right', borderaxespad=0.3)
if add_labels:
plt.text(0.03, 0.05, '(b)', transform=plt.gca().transAxes,
fontsize=6, horizontalalignment='left',
verticalalignment='bottom')
if include_twiss:
plt.subplot(gs[4])
l1 = plt.plot(slice_z, beta_x, lw=1, c='tab:blue',
label='$\\beta_x$')
l2 = plt.plot(slice_z, beta_y, lw=1, c='tab:orange',
label='$\\beta_y$')
plt.xlabel('$\\Delta z \\ [\\mathrm{\\mu m}]$')
plt.ylabel('$\\beta$ [{}]'.format(beta_units))
# make room for legend
ylim = list(plt.ylim())
ylim[1] += (ylim[1] - ylim[0]) * leg_frac
plt.ylim(ylim)
plt.xlim(xlim)
plt.twinx()
l3 = plt.plot(slice_z, alpha_x, lw=1, c='tab:blue', ls='--',
label='$\\alpha_x$')
l4 = plt.plot(slice_z, alpha_y, lw=1, c='tab:orange', ls='--',
label='$\\alpha_y$')
lines = l1 + l2 + l3 + l4
labels = [line.get_label() for line in lines]
# make room for legend
# ylim = list(plt.ylim())
# ylim[1] += (ylim[1] - ylim[0]) * leg_frac
# plt.ylim(ylim)
plt.legend(lines, labels, fontsize=6, ncol=1, frameon=False,
loc='center right', borderaxespad=0.3,
labelspacing=0.20)
if add_labels:
plt.text(0.03, 0.05, '(c)', transform=plt.gca().transAxes,
fontsize=6, horizontalalignment='left',
verticalalignment='bottom')
plt.ylabel('$\\alpha$')
if show:
plt.show()
def energy_vs_z(
z, px, py, pz, q, n_slices=50, len_slice=None, ene_bins=50,
xlim=None, ylim=None, show_text=True, x_proj=True, y_proj=True,
cbar=True, cbar_width=0.02, left=0.125, right=0.875, top=0.98,
bottom=0.13, fig=None, rasterized_scatter=None, show=True):
# analyze beam
current_prof, z_edges = bd.current_profile(z, q, n_slices=n_slices,
len_slice=len_slice)
ene_spectrum, ene_spec_edgs = bd.energy_spectrum(px, py, pz, w=q,
bins=ene_bins)
s_z = bd.rms_length(z, w=q)
len_fwhm = bd.fwhm_length(z, q, n_slices=n_slices, len_slice=len_slice)
ene_sp_tot = bd.relative_rms_energy_spread(px, py, pz, w=q)
# perform operations
gamma = np.sqrt(1 + px**2 + py**2 + pz**2)
ene = gamma * ct.m_e*ct.c**2/ct.e * 1e-9 # GeV
z_center = np.average(z, weights=q)
dz = z_edges[1] - z_edges[0]
slice_z = (z_edges[1:] - dz/2 - z_center) * 1e6 # micron
current_prof = np.abs(current_prof) * 1e-3 # kA
peak_current = np.nanmax(current_prof)
s_t = s_z * 1e15/ct.c
len_fwhm *= 1e15/ct.c # fs
ene_spec_edgs = ene_spec_edgs[:-1] + (ene_spec_edgs[1]-ene_spec_edgs[0])/2
ene_spec_edgs *= ct.m_e*ct.c**2/ct.e * 1e-9 # GeV
ene_sp_tot *= 1e2 # %
max_ene = np.nanmax(ene)
if max_ene <= 1:
ene_units = 'MeV'
ene *= 1e3
ene_spec_edgs *= 1e3
else:
ene_units = 'GeV'
ene_mean = np.average(ene, weights=q)
# make plot
if fig is None:
fig = plt.figure(figsize=(4, 2.5))
if cbar:
gs = gridspec.GridSpec(
1, 2, width_ratios=[1, cbar_width], hspace=0.1, wspace=0.05,
figure=fig, left=left, right=right, top=top, bottom=bottom)
else:
gs = gridspec.GridSpec(
1, 1, figure=fig, left=left, right=right, top=top, bottom=bottom)
with plt.rc_context(rc_params):
ax_or = plt.subplot(gs[0])
pscatt = scatter_histogram((z-z_center)*1e6, ene, bins=300,
weights=np.abs(q)*1e15,
rasterized=rasterized_scatter)
plt.xlabel('$\\Delta z \\ [\\mathrm{\\mu m}]$')
plt.ylabel('Energy [{}]'.format(ene_units))
if show_text:
params_text = ('$\\langle E \\rangle = '
+ '{:0.1f}$ {}\n'.format(ene_mean, ene_units)
+ '$\\sigma_\\mathrm{E,rel}='
+ '{:0.1f}$ %\n'.format(ene_sp_tot)
+ '$I_\\mathrm{peak}='
+ '{:0.1f}$ kA\n'.format(peak_current)
+ '$\\sigma_t='
+ '{:0.1f}$ fs'.format(s_t))
plt.text(0.98, 0.95, params_text, transform=ax_or.transAxes,
fontsize=6, horizontalalignment='right',
verticalalignment='top')
if xlim is not None:
plt.xlim(xlim)
else:
xlim = list(plt.xlim())
if y_proj:
xlim[0] -= (xlim[1] - xlim[0])/8
if show_text:
xlim[1] += (xlim[1] - xlim[0])/3
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
else:
ylim = list(plt.ylim())
if x_proj:
ylim[0] -= (ylim[1] - ylim[0])/3
plt.ylim(ylim)
# current profile plot
if x_proj:
z_or = ax_or.get_zorder()
pos = list(ax_or.get_position().bounds)
pos[3] /= 5
ax_or.patch.set_alpha(0)
ax = fig.add_axes(pos)
ax.set_zorder(z_or-1)
plt.plot(slice_z, current_prof, c='k', lw=0.5, alpha=0.5)
plt.fill_between(
slice_z, current_prof, facecolor='tab:gray', alpha=0.3)
ax.spines['left'].set_position('zero')
ax.spines['left'].set_color('tab:grey')
ax.tick_params(
axis='y', colors='tab:grey', labelsize=6, direction="in",
pad=-4)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.tick_params(axis='x', which='both', labelbottom=False)
for label in ax.yaxis.get_ticklabels():
label.set_horizontalalignment('left')
label.set_verticalalignment('bottom')
plt.xlim(xlim)
ylim_c = list(plt.ylim())
ylim_c[0] = 0
plt.ylim(ylim_c)
plt.ylabel('I [kA]', color='tab:gray', fontsize=6)
# energy profile plot
if y_proj:
z_or = ax_or.get_zorder()
pos = list(ax_or.get_position().bounds)
pos[2] /= 8
ax_or.patch.set_alpha(0)
ax = fig.add_axes(pos)
ax.set_zorder(z_or-1)
plt.plot(ene_spectrum, ene_spec_edgs, c='k', lw=0.5, alpha=0.5)
plt.fill_betweenx(
ene_spec_edgs, ene_spectrum, facecolor='tab:gray', alpha=0.3)
plt.gca().axis('off')
plt.ylim(ylim)
xlim_e = list(plt.xlim())
xlim_e[0] = 0
plt.xlim(xlim_e)
# colorbar
if cbar:
ax = plt.subplot(gs[1])
matplotlib.colorbar.Colorbar(ax, pscatt, label='Q [fC]')
if show:
plt.show()
def full_phase_space(x, y, z, px, py, pz, q, show=True, **kwargs):
fig = plt.figure(figsize=(12, 3))
grid = gridspec.GridSpec(1, 3, figure=fig, wspace=0.55)
hor_phase_space(
x, px, q, pz, subplot_spec=grid[0], fig=fig, show=False, **kwargs)
ver_phase_space(
y, py, q, pz, subplot_spec=grid[1], fig=fig, show=False, **kwargs)
lon_phase_space(
z, pz, q, subplot_spec=grid[2], fig=fig, show=False, **kwargs)
if show:
plt.show()
def lon_phase_space(
z, pz, q, beam_info=True, bins=300, **kwargs):
if beam_info:
# analyze beam
if type(bins) in [tuple, list]:
bins_x = bins[0]
else:
bins_x = bins
i_peak = bd.peak_current(z, q, n_slices=bins_x) * 1e-3 # kA
tau_fwhm = bd.fwhm_length(z, q, n_slices=bins_x) * 1e15/ct.c # fs
s_t = bd.rms_length(z, w=q) * 1e15/ct.c # fs
pz_avg = np.average(pz, weights=q)
s_pz = weighted_std(pz, weights=q) / pz_avg * 100 # %
pz_avg *= ct.m_e*ct.c**2/ct.e * 1e-9 # GeV
if pz_avg < 0.1:
pz_units = 'MeV/c'
pz_avg *= 1e3
else:
pz_units = 'GeV/c'
text = (
'$\\bar{p_z} = $' + '{:0.2f}'.format(np.around(pz_avg, 3))
+ pz_units + '\n'
+ '$\\sigma_{p_z} = $' + '{}'.format(np.around(s_pz, 3))
+ '$\\%$\n'
+ '$I_\\mathrm{peak}=' + '{:0.2f}$ kA\n'.format(i_peak)
+ '$\\sigma_t=' + '{:0.1f}$ fs\n'.format(s_t)
+ '$\\tau_{FWHM}=' + '{:0.1f}$ fs'.format(tau_fwhm)
)
# Center in z.
z_avg = np.average(z, weights=q)
delta_z = z - z_avg
phase_space_plot(
x=delta_z * 1e6,
y=pz,
w=np.abs(q),
x_name='\\Delta z',
y_name='p_z',
w_name='Q',
x_units='µm',
y_units='m_e c',
w_units='C',
text=text,
bins=bins,
**kwargs
)
def hor_phase_space(x, px, q, pz=None, beam_info=True, **kwargs):
if beam_info:
em_x = bd.normalized_transverse_rms_emittance(x, px, w=q) * 1e6
s_x = bd.rms_size(x, w=q)
text = (
'$\\epsilon_{n,x} = $' + '{}'.format(np.around(em_x, 3))
+ '$\\ \\mathrm{\\mu m}$\n'
+ '$\\sigma_{x} = $' + '{}'.format(np.around(s_x*1e6, 3))
+ '$\\ \\mathrm{\\mu m}$'
)
if pz is not None:
a_x, b_x, g_x = bd.twiss_parameters(x, px, pz, w=q)
if b_x <= 0.1:
beta_units = 'mm'
b_x *= 1e3
else:
beta_units = 'm'
text += (
'\n'
+ '$\\beta_{x} = $' + '{}'.format(np.around(b_x, 3))
+ beta_units + '\n'
+ '$\\alpha_{x} = $' + '{}'.format(np.around(a_x, 3))
)
else:
text = None
phase_space_plot(
x=x * 1e6,
y=px,
w=np.abs(q),
x_name='x',
y_name='p_x',
w_name='Q',
x_units='µm',
y_units='m_e c',
w_units='C',
text=text,
**kwargs
)
def ver_phase_space(y, py, q, pz=None, beam_info=True, **kwargs):
if beam_info:
em_y = bd.normalized_transverse_rms_emittance(y, py, w=q) * 1e6
s_y = bd.rms_size(y, w=q)
text = (
'$\\epsilon_{n,y} = $' + '{}'.format(np.around(em_y, 3))
+ '$\\ \\mathrm{\\mu m}$\n'
+ '$\\sigma_{y} = $' + '{}'.format(np.around(s_y*1e6, 3))
+ '$\\ \\mathrm{\\mu m}$'
)
if pz is not None:
a_y, b_y, g_y = bd.twiss_parameters(y, py, pz, w=q)
if b_y <= 0.1:
beta_units = 'mm'
b_y *= 1e3
else:
beta_units = 'm'
text += (
'\n'
+ '$\\beta_{y} = $' + '{}'.format(np.around(b_y, 3))
+ beta_units + '\n'
+ '$\\alpha_{y} = $' + '{}'.format(np.around(a_y, 3))
)
else:
text = None
phase_space_plot(
x=y * 1e6,
y=py,
w=np.abs(q),
x_name='y',
y_name='p_y',
w_name='Q',
x_units='µm',
y_units='m_e c',
w_units='C',
text=text,
**kwargs
)
def phase_space_plot(
x, y, w=None, x_name='', y_name='', w_name='',
x_units='', y_units='', w_units='', x_lim=None, y_lim=None,
x_projection=True, y_projection=True, projection_space=True,
bins=300, rasterized=False,
s=1, cmap='plasma', center_lines=False,
text=None, cbar=True, cbar_ticks=3, cbar_width=0.05,
subplot_spec=None, fig=None, tight_layout=False, show=True):
if cbar:
n_cols = 2
width_ratios = [1, cbar_width]
figsize = (4 * (1 + cbar_width), 4)
else:
n_cols = 1
width_ratios = None
figsize = (4, 4)
with plt.rc_context(rc_params):
if fig is None:
fig = plt.figure(figsize=figsize)
if subplot_spec is None:
grid = gridspec.GridSpec(
1, n_cols, width_ratios=width_ratios, figure=fig, wspace=0.05)
else:
grid = gridspec.GridSpecFromSubplotSpec(
1, n_cols, subplot_spec, width_ratios=width_ratios,
wspace=0.05)
ax = fig.add_subplot(grid[0])
img = scatter_histogram(
x, y, bins=bins, weights=w, range=[x_lim, y_lim], s=s,
cmap=cmap, rasterized=rasterized, ax=ax)
if center_lines:
ax.axvline(np.average(x, weights=w), ls='--', lw=0.5, c='k')
ax.axhline(np.average(y, weights=w), ls='--', lw=0.5, c='k')
x_label = ''
if len(x_name) > 0:
x_label += '${}$'.format(x_name)
if len(x_units) > 0:
x_label += ' [${}$]'.format(x_units)
y_label = ''
if len(y_name) > 0:
y_label += '${}$'.format(y_name)
if len(y_units) > 0:
y_label += ' [${}$]'.format(y_units)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
if projection_space:
if x_projection:
ylim = list(ax.get_ylim())
ylim[0] -= (ylim[1] - ylim[0])/5
ax.set_ylim(ylim)
if y_projection:
xlim = list(ax.get_xlim())
xlim[0] -= (xlim[1] - xlim[0])/5
ax.set_xlim(xlim)
if type(bins) in [tuple, list]:
bins_x, bins_y = bins
else:
bins_x = bins_y = bins
if x_projection:
add_projection(x, bins_x, ax, grid[0], fig)
if y_projection:
add_projection(y, bins_y, ax, grid[0], fig, orientation='vertical')
if text is not None:
add_text(ax, 0.05, 0.05, text, va='bottom', ha='left')
# generate colorbar
if w is not None and cbar:
cbar_label = ''
if len(w_name) > 0:
cbar_label += '${}$'.format(w_name)
if len(w_units) > 0:
cbar_label += ' [${}$]'.format(w_units)
create_vertical_colorbars(
img, cbar_label, grid[1], fig, n_ticks=cbar_ticks)
if tight_layout:
try:
grid.tight_layout(fig)
except Exception:
fig.tight_layout()
if show:
plt.show() | PypiClean |
/Adafruit_Blinka-8.20.1-py3-none-any.whl/adafruit_blinka/microcontroller/ftdi_mpsse/mpsse/i2c.py | """I2C Class for FTDI MPSSE"""
from adafruit_blinka.microcontroller.ftdi_mpsse.mpsse.pin import Pin
from adafruit_blinka.microcontroller.ftdi_mpsse.mpsse.url import (
get_ft232h_url,
get_ft2232h_url,
)
class I2C:
"""Custom I2C Class for FTDI MPSSE"""
MASTER = 0
SLAVE = 1
_mode = None
# pylint: disable=unused-argument
def __init__(self, i2c_id=None, mode=MASTER, baudrate=None, frequency=400000):
if mode != self.MASTER:
raise NotImplementedError("Only I2C Master supported!")
_mode = self.MASTER
# change GPIO controller to I2C
# pylint: disable=import-outside-toplevel
from pyftdi.i2c import I2cController
# pylint: enable=import-outside-toplevel
self._i2c = I2cController()
if i2c_id is None:
self._i2c.configure(get_ft232h_url(), frequency=frequency)
else:
self._i2c.configure(get_ft2232h_url(i2c_id), frequency=frequency)
Pin.mpsse_gpio = self._i2c.get_gpio()
def scan(self):
"""Perform an I2C Device Scan"""
return [addr for addr in range(0x79) if self._i2c.poll(addr)]
def writeto(self, address, buffer, *, start=0, end=None, stop=True):
"""Write data from the buffer to an address"""
end = end if end else len(buffer)
port = self._i2c.get_port(address)
port.write(buffer[start:end], relax=stop)
def readfrom_into(self, address, buffer, *, start=0, end=None, stop=True):
"""Read data from an address and into the buffer"""
end = end if end else len(buffer)
port = self._i2c.get_port(address)
result = port.read(len(buffer[start:end]), relax=stop)
for i, b in enumerate(result):
buffer[start + i] = b
# pylint: disable=unused-argument
def writeto_then_readfrom(
self,
address,
buffer_out,
buffer_in,
*,
out_start=0,
out_end=None,
in_start=0,
in_end=None,
stop=False,
):
"""Write data from buffer_out to an address and then
read data from an address and into buffer_in
"""
out_end = out_end if out_end else len(buffer_out)
in_end = in_end if in_end else len(buffer_in)
port = self._i2c.get_port(address)
result = port.exchange(
buffer_out[out_start:out_end], in_end - in_start, relax=True
)
for i, b in enumerate(result):
buffer_in[in_start + i] = b
# pylint: enable=unused-argument | PypiClean |
/Flask-Ask-0.9.8.tar.gz/Flask-Ask-0.9.8/README.rst |
.. image:: http://flask-ask.readthedocs.io/en/latest/_images/logo-full.png
===================================
Program the Amazon Echo with Python
===================================
Flask-Ask is a `Flask extension <http://flask.pocoo.org/extensions/>`_ that makes building Alexa skills for the Amazon Echo easier and much more fun.
* `Flask-Ask quickstart on Amazon's Developer Blog <https://developer.amazon.com/public/community/post/Tx14R0IYYGH3SKT/Flask-Ask-A-New-Python-Framework-for-Rapid-Alexa-Skills-Kit-Development>`_.
* `Level Up with our Alexa Skills Kit Video Tutorial <https://alexatutorial.com/>`_
* `Chat on Gitter.im <https://gitter.im/johnwheeler/flask-ask/>`_
The Basics
===============
A Flask-Ask application looks like this:
.. code-block:: python
from flask import Flask
from flask_ask import Ask, statement
app = Flask(__name__)
ask = Ask(app, '/')
@ask.intent('HelloIntent')
def hello(firstname):
speech_text = "Hello %s" % firstname
return statement(speech_text).simple_card('Hello', speech_text)
if __name__ == '__main__':
app.run()
In the code above:
#. The ``Ask`` object is created by passing in the Flask application and a route to forward Alexa requests to.
#. The ``intent`` decorator maps ``HelloIntent`` to a view function ``hello``.
#. The intent's ``firstname`` slot is implicitly mapped to ``hello``'s ``firstname`` parameter.
#. Lastly, a builder constructs a spoken response and displays a contextual card in the Alexa smartphone/tablet app.
More code examples are in the `samples <https://github.com/johnwheeler/flask-ask/tree/master/samples>`_ directory.
Jinja Templates
---------------
Since Alexa responses are usually short phrases, you might find it convenient to put them in the same file.
Flask-Ask has a `Jinja template loader <http://jinja.pocoo.org/docs/dev/api/#loaders>`_ that loads
multiple templates from a single YAML file. For example, here's a template that supports the minimal voice interface
above:
.. code-block:: yaml
hello: Hello, {{ firstname }}
Templates are stored in a file called `templates.yaml` located in the application root. Checkout the `Tidepooler example <https://github.com/johnwheeler/flask-ask/tree/master/samples/tidepooler>`_ to see why it makes sense to extract speech out of the code and into templates as the number of spoken phrases grow.
Features
===============
Flask-Ask handles the boilerplate, so you can focus on writing clean code. Flask-Ask:
* Has decorators to map Alexa requests and intent slots to view functions
* Helps construct ask and tell responses, reprompts and cards
* Makes session management easy
* Allows for the separation of code and speech through Jinja templates
* Verifies Alexa request signatures
Installation
===============
To install Flask-Ask::
pip install flask-ask
Documentation
===============
These resources will get you up and running quickly:
* `5-minute quickstart <https://www.youtube.com/watch?v=cXL8FDUag-s>`_
* `Full online documentation <https://alexatutorial.com/flask-ask/>`_
Fantastic 3-part tutorial series by Harrison Kinsley
* `Intro and Skill Logic - Alexa Skills w/ Python and Flask-Ask Part 1 <https://pythonprogramming.net/intro-alexa-skill-flask-ask-python-tutorial/>`_
* `Headlines Function - Alexa Skills w/ Python and Flask-Ask Part 2 <https://pythonprogramming.net/headlines-function-alexa-skill-flask-ask-python-tutorial/>`_
* `Testing our Skill - Alexa Skills w/ Python and Flask-Ask Part 3 <https://pythonprogramming.net/testing-deploying-alexa-skill-flask-ask-python-tutorial/>`_
Deployment
===============
You can deploy using any WSGI compliant framework (uWSGI, Gunicorn). If you haven't deployed a Flask app to production, `checkout flask-live-starter <https://github.com/johnwheeler/flask-live-starter>`_.
To deploy on AWS Lambda, you have two options. Use `Zappa <https://github.com/Miserlou/Zappa>`_ to automate the deployment of an AWS Lambda function and an AWS API Gateway to provide a public facing endpoint for your Lambda function. This `blog post <https://developer.amazon.com/blogs/post/8e8ad73a-99e9-4c0f-a7b3-60f92287b0bf/new-alexa-tutorial-deploy-flask-ask-skills-to-aws-lambda-with-zappa>`_ shows how to deploy Flask-Ask with Zappa from scratch. Note: When deploying to AWS Lambda with Zappa, make sure you point the Alexa skill to the HTTPS API gateway that Zappa creates, not the Lambda function's ARN.
Alternatively, you can use AWS Lambda directly without the need for an AWS API Gateway endpoint. In this case you will need to `deploy <https://developer.amazon.com/docs/custom-skills/host-a-custom-skill-as-an-aws-lambda-function.html>`_ your Lambda function yourself and use `virtualenv <http://docs.aws.amazon.com/lambda/latest/dg/lambda-python-how-to-create-deployment-package.html>`_ to create a deployment package that contains your Flask-Ask application along with its dependencies, which can be uploaded to Lambda. If your Lambda handler is configured as `lambda_function.lambda_handler`, then you would save the full application example above in a file called `lambda_function.py` and add the following two lines to it:
.. code-block:: python
def lambda_handler(event, _context):
return ask.run_aws_lambda(event)
Development
===============
If you'd like to work from the Flask-Ask source, clone the project and run::
pip install -r requirements-dev.txt
This will install all base requirements from `requirements.txt` as well as requirements needed for running tests from the `tests` directory.
Tests can be run with::
python setup.py test
Or::
python -m unittest
To install from your local clone or fork of the project, run::
python setup.py install
Related projects
===============
`cookiecutter-flask-ask <https://github.com/chrisvoncsefalvay/cookiecutter-flask-ask>`_ is a Cookiecutter to easily bootstrap a Flask-Ask project, including documentation, speech assets and basic built-in intents.
Have a Google Home? Checkout `Flask-Assistant <https://github.com/treethought/flask-assistant>`_ (early alpha)
Thank You
===============
Thanks for checking this library out! I hope you find it useful.
Of course, there's always room for improvement.
Feel free to `open an issue <https://github.com/johnwheeler/flask-ask/issues>`_ so we can make Flask-Ask better.
Special thanks to `@kennethreitz <https://github.com/kennethreitz>`_ for his `sense <http://docs.python-requests.org/en/master/>`_ of `style <https://github.com/kennethreitz/records/blob/master/README.rst>`_, and of course, `@mitsuhiko <https://github.com/mitsuhiko>`_ for `Flask <https://www.palletsprojects.com/p/flask/>`_
| PypiClean |
/DBUtils-3.0.3.tar.gz/DBUtils-3.0.3/README.md | DBUtils
=======
DBUtils is a suite of tools providing solid, persistent and pooled connections
to a database that can be used in all kinds of multi-threaded environments.
The suite supports DB-API 2 compliant database interfaces
and the classic PyGreSQL interface.
The current version 3.0.3 of DBUtils supports Python versions 3.6 to 3.11.
**Please have a look at the [changelog](https://webwareforpython.github.io/DBUtils/changelog.html), because there were some breaking changes in version 2.0.**
The DBUtils home page can be found at https://webwareforpython.github.io/DBUtils/
| PypiClean |
/BetterTablesExtension-0.0.2.tar.gz/BetterTablesExtension-0.0.2/better_tables.py | from __future__ import absolute_import
from __future__ import unicode_literals
from markdown import Extension
from markdown.blockprocessors import BlockProcessor
from markdown.util import etree
class BetterTableProcessor(BlockProcessor):
""" Process Tables. """
def test(self, parent, block):
rows = block.split('\n')
return (len(rows) > 2 and '|' in rows[0] and
'|' in rows[1] and '-' in rows[1] and
rows[1].strip()[0] in ['|', ':', '-'])
def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header.startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(seperator, border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
if border:
table.set('border', '1')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row.strip(), tbody, align, border)
def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError: # pragma: no cover
c.text = ""
if a:
c.set('align', a)
def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|')
class BetterTablesExtension(Extension):
""" Add tables to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table',
BetterTableProcessor(md.parser),
'<hashheader')
def makeExtension(*args, **kwargs):
return BetterTablesExtension(*args, **kwargs) | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Scanner/__init__.py | import re
import SCons.Node.FS
import SCons.PathList
import SCons.Util
class _Null:
pass
# This is used instead of None as a default argument value so None can be
# used as an actual argument value.
_null = _Null
def Scanner(function, *args, **kwargs):
"""Factory function to create a Scanner Object.
Creates the appropriate Scanner based on the type of "function".
TODO: Deprecate this some day. We've moved the functionality
inside the ScannerBase class and really don't need this factory function
any more. It was, however, used by some of our Tool modules, so
the call probably ended up in various people's custom modules
patterned on SCons code.
"""
if SCons.Util.is_Dict(function):
return Selector(function, *args, **kwargs)
return ScannerBase(function, *args, **kwargs)
class FindPathDirs:
"""Class to bind a specific E{*}PATH variable name to a function that
will return all of the E{*}path directories.
"""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
try:
path = env[self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
class ScannerBase:
"""Base class for dependency scanners.
Implements straightforward, single-pass scanning of a single file.
A Scanner is usually set up with a scanner function (and optionally
a path function), but can also be a kind of dispatcher which
passes control to other Scanners.
A scanner function takes three arguments: a Node to scan for
dependecies, the construction environment to use, and an optional
tuple of paths (as generated by the optional path function).
It must return a list containing the Nodes for all the direct
dependencies of the file.
The optional path function is called to return paths that can be
searched for implicit dependency files. It takes five arguments:
a construction environment, a Node for the directory containing
the SConscript file that defined the primary target, a list of
target nodes, a list of source nodes, and the optional argument
for this instance.
Examples::
s = Scanner(my_scanner_function)
s = Scanner(function=my_scanner_function)
s = Scanner(function=my_scanner_function, argument='foo')
Args:
function: either a scanner function taking two or three arguments
and returning a list of File Nodes; or a mapping of keys to
other Scanner objects.
name: an optional name for identifying this scanner object
(defaults to "NONE").
argument: an optional argument that will be passed to both
*function* and *path_function*.
skeys: an optional list argument that can be used
to determine if this scanner can be used for a given Node.
In the case of File nodes, for example, the *skeys*
would be file suffixes.
path_function: an optional function which returns a tuple
of the directories that can be searched for implicit
dependency files. May also return a callable which
is called with no args and returns the tuple (supporting
Bindable class).
node_class: optional class of Nodes which this scan will return.
If not specified, defaults to :class:`SCons.Node.FS.Base`.
If *node_class* is ``None``, then this scanner will not enforce
any Node conversion and will return the raw results from *function*.
node_factory: optional factory function to be called to
translate the raw results returned by *function*
into the expected *node_class* objects.
scan_check: optional function to be called to first check whether
this node really needs to be scanned.
recursive: optional specifier of whether this scanner should be
invoked recursively on all of the implicit dependencies it returns
(for example `#include` lines in C source files, which may refer
to header files which should themselves be scanned).
May be a callable, which will be called to filter
the list of nodes found to select a subset for recursive
scanning (the canonical example being only recursively
scanning subdirectories within a directory). The default
is to not do recursive scanning.
"""
def __init__(
self,
function,
name="NONE",
argument=_null,
skeys=_null,
path_function=None,
# Node.FS.Base so that, by default, it's okay for a
# scanner to return a Dir, File or Entry.
node_class=SCons.Node.FS.Base,
node_factory=None,
scan_check=None,
recursive=None,
):
"""Construct a new scanner object given a scanner function."""
# Note: this class could easily work with scanner functions that take
# something other than a filename as an argument (e.g. a database
# node) and a dependencies list that aren't file names. All that
# would need to be changed is the documentation.
self.function = function
self.path_function = path_function
self.name = name
self.argument = argument
if skeys is _null:
if SCons.Util.is_Dict(function):
skeys = list(function.keys())
else:
skeys = []
self.skeys = skeys
self.node_class = node_class
self.node_factory = node_factory
self.scan_check = scan_check
if callable(recursive):
self.recurse_nodes = recursive
elif recursive:
self.recurse_nodes = self._recurse_all_nodes
else:
self.recurse_nodes = self._recurse_no_nodes
def path(self, env, dir=None, target=None, source=None):
if not self.path_function:
return ()
if self.argument is not _null:
return self.path_function(env, dir, target, source, self.argument)
return self.path_function(env, dir, target, source)
def __call__(self, node, env, path=()) -> list:
"""Scans a single object.
Args:
node: the node that will be passed to the scanner function
env: the environment that will be passed to the scanner function.
path: tuple of paths from the `path_function`
Returns:
A list of direct dependency nodes for the specified node.
"""
if self.scan_check and not self.scan_check(node, env):
return []
# here we may morph into a different Scanner instance:
self = self.select(node) # pylint: disable=self-cls-assignment
if self.argument is not _null:
node_list = self.function(node, env, path, self.argument)
else:
node_list = self.function(node, env, path)
kw = {}
if hasattr(node, 'dir'):
kw['directory'] = node.dir
conv = env.get_factory(self.node_factory)
cls = self.node_class
nl = [conv(n, **kw) if cls and not isinstance(n, cls) else n for n in node_list]
return nl
def __eq__(self, other):
try:
return self.__dict__ == other.__dict__
except AttributeError:
# other probably doesn't have a __dict__
return self.__dict__ == other
def __hash__(self):
return id(self)
def __str__(self):
return self.name
def add_skey(self, skey):
"""Add a skey to the list of skeys"""
self.skeys.append(skey)
def get_skeys(self, env=None):
if env and SCons.Util.is_String(self.skeys):
return env.subst_list(self.skeys)[0]
return self.skeys
def select(self, node):
if SCons.Util.is_Dict(self.function):
key = node.scanner_key()
try:
return self.function[key]
except KeyError:
return None
else:
return self
@staticmethod
def _recurse_all_nodes(nodes):
return nodes
@staticmethod
def _recurse_no_nodes(nodes):
return []
# recurse_nodes = _recurse_no_nodes
def add_scanner(self, skey, scanner):
self.function[skey] = scanner
self.add_skey(skey)
# keep the old name for a while in case external users are using.
# there are no more internal uses of this class by the name "Base"
Base = ScannerBase
class Selector(ScannerBase):
"""
A class for selecting a more specific scanner based on the
:func:`scanner_key` (suffix) for a specific Node.
TODO: This functionality has been moved into the inner workings of
the ScannerBase class, and this class will be deprecated at some point.
(It was never exposed directly as part of the public interface,
although it is used by the :func:`Scanner` factory function that was
used by various Tool modules and therefore was likely a template
for custom modules that may be out there.)
"""
def __init__(self, mapping, *args, **kwargs):
super().__init__(None, *args, **kwargs)
self.mapping = mapping
self.skeys = list(mapping.keys())
def __call__(self, node, env, path=()):
return self.select(node)(node, env, path)
def select(self, node):
try:
return self.mapping[node.scanner_key()]
except KeyError:
return None
def add_scanner(self, skey, scanner):
self.mapping[skey] = scanner
self.add_skey(skey)
class Current(ScannerBase):
"""
A class for scanning files that are source files (have no builder)
or are derived files and are current (which implies that they exist,
either locally or in a repository).
"""
def __init__(self, *args, **kwargs):
def current_check(node, env):
return not node.has_builder() or node.is_up_to_date()
kwargs['scan_check'] = current_check
super().__init__(*args, **kwargs)
class Classic(Current):
"""
A Scanner subclass to contain the common logic for classic CPP-style
include scanning, but which can be customized to use different
regular expressions to find the includes.
Note that in order for this to work "out of the box" (without
overriding the :meth:`find_include` and :meth:`sort_key1` methods),
the regular expression passed to the constructor must return the
name of the include file in group 0.
"""
def __init__(self, name, suffixes, path_variable, regex, *args, **kwargs):
self.cre = re.compile(regex, re.M)
def _scan(node, _, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, path)
kwargs['function'] = _scan
kwargs['path_function'] = FindPathDirs(path_variable)
# Allow recursive to propagate if child class specifies.
# In this case resource scanner needs to specify a filter on which files
# get recursively processed. Previously was hardcoded to 1 instead of
# defaulted to 1.
kwargs['recursive'] = kwargs.get('recursive', True)
kwargs['skeys'] = suffixes
kwargs['name'] = name
super().__init__(*args, **kwargs)
@staticmethod
def find_include(include, source_dir, path):
n = SCons.Node.FS.find_file(include, (source_dir,) + tuple(path))
return n, include
@staticmethod
def sort_key(include):
return SCons.Node.FS._my_normcase(include)
def find_include_names(self, node):
return self.cre.findall(node.get_text_contents())
def scan(self, node, path=()):
# cache the includes list in node so we only scan it once:
if node.includes is not None:
includes = node.includes
else:
includes = self.find_include_names(node)
# Intern the names of the include files. Saves some memory
# if the same header is included many times.
node.includes = list(map(SCons.Util.silent_intern, includes))
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the #include line (including the
# " or <, since that may affect what file is found), which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for include in includes:
n, i = self.find_include(include, source_dir, path)
if n is None:
SCons.Warnings.warn(
SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s "
"(included from: %s) -- file not found" % (i, node),
)
else:
nodes.append((self.sort_key(include), n))
return [pair[1] for pair in sorted(nodes)]
class ClassicCPP(Classic):
"""
A Classic Scanner subclass which takes into account the type of
bracketing used to include the file, and uses classic CPP rules
for searching for the files based on the bracketing.
Note that in order for this to work, the regular expression passed
to the constructor must return the leading bracket in group 0, and
the contained filename in group 1.
"""
def find_include(self, include, source_dir, path):
include = list(map(SCons.Util.to_str, include))
if include[0] == '"':
paths = (source_dir,) + tuple(path)
else:
paths = tuple(path) + (source_dir,)
n = SCons.Node.FS.find_file(include[1], paths)
i = SCons.Util.silent_intern(include[1])
return n, i
def sort_key(self, include):
return SCons.Node.FS._my_normcase(' '.join(include))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/BayesASE-21.1.13.1.tar.gz/BayesASE-21.1.13.1/scripts/check_comparate_design_file.py | import argparse
import os
import pandas as pd
def get_args():
"""Process command-line arguments"""
parser = argparse.ArgumentParser(description="""Check user-supplied Pre-Bayesian design file for
correct formatting and adherence to BASE guidelines""")
parser.add_argument('-design','--design',dest='design', action='store', required=True,
help='Input Design File. See BASE User Guide for formatting help [REQUIRED]')
parser.add_argument('-compNum','--compNum',dest='compNum',type=int, action='store', required=True,
help='Number of comparates')
parser.add_argument('-o','--out', dest='out', action='store', required=True,
help='Name of log file that checks design file')
args = parser.parse_args()
return args
def err_msg(col):
"""Create an error message"""
return f"\tError: column '{col}' does not exist or is mislabeled in design file\n"
def main():
"""
Check that the correct columns exist in design file and are in the correct order.
"""
args = get_args()
df=pd.read_csv(args.design, sep='\t', index_col=None)
headers=df.columns.tolist()
in_file = os.path.split(args.design)[1]
general_success_msg = f'\tThe columns are labeled correctly and are in the correct order\n'
general_error_msg = f"""\tError: Design file format does not align with BASE requirements.
{' '*6}\tColumn names are either incorrectly labeled, missing, or out of order\n"""
column_names = {'g1': 'C1_G1', 'g2': 'C1_G2', 'c1': 'C2_G1', 'c2': 'C2_G2',
'comparate_1': 'Comparate_1', 'comparate_2': 'Comparate_2', 'compID': 'compID'}
fixed_column_ids = {1: ['g1', 'g2', 'comparate_1', 'compID'],
2: ['g1', 'g2', 'c1', 'c2', 'comparate_1', 'comparate_2', 'compID']}
column_id_list = fixed_column_ids[args.compNum]
fixed_column_names = [column_names[x] for x in column_id_list]
with open(args.out, 'w') as outfile:
outfile.write(f'Design_file_name: {in_file}\n')
if headers != fixed_column_names:
outfile.write(general_error_msg)
for column_name in fixed_column_names:
if column_name not in headers:
msg = err_msg(column_name)
outfile.write(msg)
else:
outfile.write(general_success_msg)
if __name__ == '__main__':
main() | PypiClean |
/DI-treetensor-0.4.1.tar.gz/DI-treetensor-0.4.1/treetensor/torch/funcs/comparison.py | import builtins
import torch
from .base import doc_from_base, func_treelize
from ..stream import stream_call
from ...common import ireduce
__all__ = [
'equal',
'isfinite', 'isinf', 'isnan', 'isclose',
'eq', 'ne', 'lt', 'le', 'gt', 'ge',
]
# noinspection PyShadowingBuiltins
@doc_from_base()
@ireduce(builtins.all)
@func_treelize()
def equal(input, other):
"""
In ``treetensor``, you can get the equality of the two tree tensors.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.equal(
... torch.tensor([1, 2, 3]),
... torch.tensor([1, 2, 3]),
... ) # the same as torch.equal
True
>>> ttorch.equal(
... ttorch.tensor({
... 'a': torch.tensor([1, 2, 3]),
... 'b': torch.tensor([[4, 5], [6, 7]]),
... }),
... ttorch.tensor({
... 'a': torch.tensor([1, 2, 3]),
... 'b': torch.tensor([[4, 5], [6, 7]]),
... }),
... )
True
"""
return stream_call(torch.equal, input, other)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def isfinite(input):
"""
In ``treetensor``, you can get a tree of new tensors with boolean elements
representing if each element is `finite` or not.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([ True, False, True, False, False])
>>> ttorch.isfinite(ttorch.tensor({
... 'a': [1, float('inf'), 2, float('-inf'), float('nan')],
... 'b': {'x': [[1, float('inf'), -2], [float('-inf'), 3, float('nan')]]}
... }))
<Tensor 0x7fb782a15970>
├── a --> tensor([ True, False, True, False, False])
└── b --> <Tensor 0x7fb782a1e040>
└── x --> tensor([[ True, False, True],
[False, True, False]])
"""
return stream_call(torch.isfinite, input)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def isinf(input):
"""
In ``treetensor``, you can test if each element of ``input``
is infinite (positive or negative infinity) or not.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([False, True, False, True, False])
>>> ttorch.isinf(ttorch.tensor({
... 'a': [1, float('inf'), 2, float('-inf'), float('nan')],
... 'b': {'x': [[1, float('inf'), -2], [float('-inf'), 3, float('nan')]]}
... }))
<Tensor 0x7fb782a29b80>
├── a --> tensor([False, True, False, True, False])
└── b --> <Tensor 0x7fb782a2d1f0>
└── x --> tensor([[False, True, False],
[ True, False, False]])
"""
return stream_call(torch.isinf, input)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def isnan(input):
"""
In ``treetensor``, you get a tree of new tensors with boolean elements representing
if each element of ``input`` is NaN or not
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.isnan(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([False, False, False, False, True])
>>> ttorch.isnan(ttorch.tensor({
... 'a': [1, float('inf'), 2, float('-inf'), float('nan')],
... 'b': {'x': [[1, float('inf'), -2], [float('-inf'), 3, float('nan')]]}
... }))
<Tensor 0x7fb782a2d0a0>
├── a --> tensor([False, False, False, False, True])
└── b --> <Tensor 0x7fb782a29d90>
└── x --> tensor([[False, False, False],
[False, False, True]])
"""
return stream_call(torch.isnan, input)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def isclose(input, other, *args, **kwargs):
"""
Returns a new tensor with boolean elements representing
if each element of ``input`` is “close” to the corresponding element of ``other``.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> import math
>>> ttorch.isclose(
... ttorch.tensor((1., 2, 3)),
... ttorch.tensor((1 + 1e-10, 3, 4))
... )
tensor([ True, False, False])
>>> ttorch.isclose(
... ttorch.tensor({
... 'a': [1., 2, 3],
... 'b': {'x': [[float('inf'), 4, 1e20],
... [-math.inf, 2.2943, 9483.32]]},
... }),
... ttorch.tensor({
... 'a': [1 + 1e-10, 3, 4],
... 'b': {'x': [[math.inf, 6, 1e20+1],
... [-float('inf'), 2.294300000001, 9484.32]]},
... }),
... )
<Tensor 0x7f5b3219f370>
├── a --> tensor([ True, False, False])
└── b --> <Tensor 0x7f5b3219f550>
└── x --> tensor([[ True, False, True],
[ True, True, False]])
"""
return stream_call(torch.isclose, input, other, *args, **kwargs)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def eq(input, other, *args, **kwargs):
"""
In ``treetensor``, you can get the equality of the two tree tensors with :func:`eq`.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.eq(
... torch.tensor([[1, 2], [3, 4]]),
... torch.tensor([[1, 1], [4, 4]]),
... )
tensor([[ True, False],
[False, True]])
>>> ttorch.eq(
... ttorch.tensor({
... 'a': [[1, 2], [3, 4]],
... 'b': [1.0, 1.5, 2.0],
... }),
... ttorch.tensor({
... 'a': [[1, 1], [4, 4]],
... 'b': [1.3, 1.2, 2.0],
... }),
... )
<Tensor 0x7ff363bbce10>
├── a --> tensor([[ True, False],
│ [False, True]])
└── b --> tensor([False, False, True])
"""
return stream_call(torch.eq, input, other, *args, **kwargs)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def ne(input, other, *args, **kwargs):
"""
In ``treetensor``, you can get the non-equality of the two tree tensors with :func:`ne`.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.ne(
... torch.tensor([[1, 2], [3, 4]]),
... torch.tensor([[1, 1], [4, 4]]),
... )
tensor([[False, True],
[ True, False]])
>>> ttorch.ne(
... ttorch.tensor({
... 'a': [[1, 2], [3, 4]],
... 'b': [1.0, 1.5, 2.0],
... }),
... ttorch.tensor({
... 'a': [[1, 1], [4, 4]],
... 'b': [1.3, 1.2, 2.0],
... }),
... )
<Tensor 0x7ff363bb6cf8>
├── a --> tensor([[False, True],
│ [ True, False]])
└── b --> tensor([ True, True, False])
"""
return stream_call(torch.ne, input, other, *args, **kwargs)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def lt(input, other, *args, **kwargs):
"""
In ``treetensor``, you can get less-than situation of the two tree tensors with :func:`lt`.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.lt(
... torch.tensor([[1, 2], [3, 4]]),
... torch.tensor([[1, 1], [4, 4]]),
... )
tensor([[False, False],
[ True, False]])
>>> ttorch.lt(
... ttorch.tensor({
... 'a': [[1, 2], [3, 4]],
... 'b': [1.0, 1.5, 2.0],
... }),
... ttorch.tensor({
... 'a': [[1, 1], [4, 4]],
... 'b': [1.3, 1.2, 2.0],
... }),
... )
<Tensor 0x7ff363bc67f0>
├── a --> tensor([[False, False],
│ [ True, False]])
└── b --> tensor([ True, False, False])
"""
return stream_call(torch.lt, input, other, *args, **kwargs)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def le(input, other, *args, **kwargs):
"""
In ``treetensor``, you can get less-than-or-equal situation of the two tree tensors with :func:`le`.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.le(
... torch.tensor([[1, 2], [3, 4]]),
... torch.tensor([[1, 1], [4, 4]]),
... )
tensor([[ True, False],
[ True, True]])
>>> ttorch.le(
... ttorch.tensor({
... 'a': [[1, 2], [3, 4]],
... 'b': [1.0, 1.5, 2.0],
... }),
... ttorch.tensor({
... 'a': [[1, 1], [4, 4]],
... 'b': [1.3, 1.2, 2.0],
... }),
... )
<Tensor 0x7ff363bc6198>
├── a --> tensor([[ True, False],
│ [ True, True]])
└── b --> tensor([ True, False, True])
"""
return stream_call(torch.le, input, other, *args, **kwargs)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def gt(input, other, *args, **kwargs):
"""
In ``treetensor``, you can get greater-than situation of the two tree tensors with :func:`gt`.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.gt(
... torch.tensor([[1, 2], [3, 4]]),
... torch.tensor([[1, 1], [4, 4]]),
... )
tensor([[False, True],
[False, False]])
>>> ttorch.gt(
... ttorch.tensor({
... 'a': [[1, 2], [3, 4]],
... 'b': [1.0, 1.5, 2.0],
... }),
... ttorch.tensor({
... 'a': [[1, 1], [4, 4]],
... 'b': [1.3, 1.2, 2.0],
... }),
... )
<Tensor 0x7ff363bc6f28>
├── a --> tensor([[False, True],
│ [False, False]])
└── b --> tensor([False, True, False])
"""
return stream_call(torch.gt, input, other, *args, **kwargs)
# noinspection PyShadowingBuiltins
@doc_from_base()
@func_treelize()
def ge(input, other, *args, **kwargs):
"""
In ``treetensor``, you can get greater-than-or-equal situation of the two tree tensors with :func:`ge`.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.ge(
... torch.tensor([[1, 2], [3, 4]]),
... torch.tensor([[1, 1], [4, 4]]),
... )
tensor([[ True, True],
[False, True]])
>>> ttorch.ge(
... ttorch.tensor({
... 'a': [[1, 2], [3, 4]],
... 'b': [1.0, 1.5, 2.0],
... }),
... ttorch.tensor({
... 'a': [[1, 1], [4, 4]],
... 'b': [1.3, 1.2, 2.0],
... }),
... )
<Tensor 0x7ff363bc6f28>
├── a --> tensor([[ True, True],
│ [False, True]])
└── b --> tensor([False, True, True])
"""
return stream_call(torch.ge, input, other, *args, **kwargs) | PypiClean |
/OTLModel/Classes/LijnvormigElementMarkering.py | from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.Markering import Markering
from OTLMOW.OTLModel.Classes.AOWSType import AOWSType
from OTLMOW.OTLModel.Datatypes.KlLEMarkeringCode import KlLEMarkeringCode
from OTLMOW.OTLModel.Datatypes.KlLEMarkeringSoort import KlLEMarkeringSoort
from OTLMOW.OTLModel.Datatypes.KwantWrdInMeter import KwantWrdInMeter
from OTLMOW.OTLModel.Datatypes.KwantWrdInVierkanteMeter import KwantWrdInVierkanteMeter
from OTLMOW.GeometrieArtefact.LijnGeometrie import LijnGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class LijnvormigElementMarkering(Markering, AOWSType, LijnGeometrie):
"""Een markering van een lijnvormig element om de zichtbaarheid te verhogen om het verkeer te waarschuwen, informeren of regelen."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#LijnvormigElementMarkering'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
AOWSType.__init__(self)
Markering.__init__(self)
LijnGeometrie.__init__(self)
self._code = OTLAttribuut(field=KlLEMarkeringCode,
naam='code',
label='code',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#LijnvormigElementMarkering.code',
definition='De (COPRO/BENOR) code van de lijnvormig element markering.',
owner=self)
self._lengte = OTLAttribuut(field=KwantWrdInMeter,
naam='lengte',
label='lengte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#LijnvormigElementMarkering.lengte',
definition='De lengte van de markering in meter.',
owner=self)
self._oppervlakte = OTLAttribuut(field=KwantWrdInVierkanteMeter,
naam='oppervlakte',
label='oppervlakte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#LijnvormigElementMarkering.oppervlakte',
definition='De oppervlakte van de markering op het lijnvormig element in vierkante meter.',
owner=self)
self._soortOmschrijving = OTLAttribuut(field=KlLEMarkeringSoort,
naam='soortOmschrijving',
label='soort omschrijving',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#LijnvormigElementMarkering.soortOmschrijving',
definition='De soort en tevens de omschrijving van de lijnvormige elementen markering.',
owner=self)
@property
def code(self):
"""De (COPRO/BENOR) code van de lijnvormig element markering."""
return self._code.get_waarde()
@code.setter
def code(self, value):
self._code.set_waarde(value, owner=self)
@property
def lengte(self):
"""De lengte van de markering in meter."""
return self._lengte.get_waarde()
@lengte.setter
def lengte(self, value):
self._lengte.set_waarde(value, owner=self)
@property
def oppervlakte(self):
"""De oppervlakte van de markering op het lijnvormig element in vierkante meter."""
return self._oppervlakte.get_waarde()
@oppervlakte.setter
def oppervlakte(self, value):
self._oppervlakte.set_waarde(value, owner=self)
@property
def soortOmschrijving(self):
"""De soort en tevens de omschrijving van de lijnvormige elementen markering."""
return self._soortOmschrijving.get_waarde()
@soortOmschrijving.setter
def soortOmschrijving(self, value):
self._soortOmschrijving.set_waarde(value, owner=self) | PypiClean |
/CDS-1.0.1.tar.gz/CDS-1.0.1/cds/modules/webhooks/status.py | from __future__ import absolute_import
import json
import sqlalchemy
from copy import deepcopy
from celery import states
from invenio_webhooks.models import Event
def get_deposit_events(deposit_id, _deleted=False):
"""Get a list of events associated with a deposit."""
# return Event.query.filter(
# Event.payload.op('->>')(
# 'deposit_id').cast(String) == self['_deposit']['id']).all()
deposit_id = str(deposit_id)
# do you want to involve deleted events?
filters = []
if not _deleted:
filters.append(Event.response_code == 202)
# build base query
query = Event.query.filter(
sqlalchemy.cast(
Event.payload['deposit_id'],
sqlalchemy.String) == sqlalchemy.type_coerce(
deposit_id, sqlalchemy.JSON)
)
# execute with more filters
return query.filter(*filters).all()
def iterate_events_results(events, fun):
"""Iterate over the results of each event."""
for event in events:
if event.receiver.has_result(event):
raw_info = event.receiver._raw_info(event)
iterate_result(raw_info=raw_info, fun=fun)
return fun
def get_tasks_status_by_task(events, statuses=None):
"""Get tasks status grouped by task name."""
statuses = statuses or {}
status_extractor = CollectStatusesByTask(statuses=statuses)
iterate_events_results(events=events, fun=status_extractor)
return status_extractor.statuses
def iterate_result(raw_info, fun):
"""Iterate through raw information generated by celery receivers.
:param raw_info: raw information from celery receiver.
:param fun: A function that extract some information from celery result.
E.g. lambda task_name, result: result.status
:returns: Elaborated version by fun() of raw_info data.
"""
if isinstance(raw_info, list):
# group results
return list(iterate_result(el, fun) for el in raw_info)
elif isinstance(raw_info, tuple):
# chain results
return tuple(iterate_result(el, fun) for el in raw_info)
else:
# single result
task_name, result = next(iter(raw_info.items()))
return fun(task_name, result)
def _compute_status(statuses):
"""Compute minimum state."""
if len(statuses) > 0 and all(status_to_check is None
for status_to_check in statuses):
return None
for status_to_check in [states.FAILURE, states.STARTED,
states.RETRY, states.PENDING]:
if any(status == status_to_check for status in statuses):
return status_to_check
if len(statuses) > 0 and all(status_to_check == states.REVOKED
for status_to_check in statuses):
return states.REVOKED
return states.SUCCESS
def merge_tasks_status(task_statuses_1, task_statuses_2):
"""Merge task statuses."""
statuses = {}
for key in set(task_statuses_1.keys()) | set(task_statuses_2.keys()):
statuses[key] = _compute_status(
[task_statuses_1.get(key), task_statuses_2.get(key)])
return statuses
class ComputeGlobalStatus(object):
"""Compute a global status from celery receiver raw info."""
def __init__(self):
"""Init status collection list."""
self._statuses = []
def __call__(self, task_name, result):
"""Accumulate status information."""
self._statuses.append(result.status)
@property
def status(self):
"""Elaborate global status."""
return _compute_status(self._statuses)
class ResultEncoder(json.JSONEncoder):
"""Celery task result encoder."""
def default(self, obj):
"""Encode the result."""
if isinstance(obj, Exception):
return str(obj)
return json.JSONEncoder.default(self, obj)
def collect_info(task_name, result):
"""Collect information from a celery result."""
return {
'id': result.id,
'status': result.status,
'info': result.info,
'name': task_name
}
class CollectStatusesByTask(object):
"""Collect status information and organize by task name."""
def __init__(self, statuses):
"""Init status collection list."""
self._statuses = {}
self._original = deepcopy(statuses)
def __call__(self, task_name, result):
"""Update status collection."""
old_status = self._statuses.get(task_name, None)
# get new status from celery only if still exists on celery cache
new_status = result.status \
if result.result is not None else None
self._statuses[task_name] = _compute_status([old_status, new_status])
@property
def statuses(self):
"""Get new status or original."""
# take the calculated
statuses = {key: value for key, value in self._statuses.items()
if value is not None}
# and add orignal value if there is no new value
keys = set(self._original) - set(statuses)
for key in keys:
statuses[key] = self._original[key]
return statuses
class CollectInfoTasks(object):
"""Collect information from the tasks."""
def __init__(self):
"""Init."""
self._task_names = []
def __call__(self, task_name, result):
"""Accumulate task name information."""
self._task_names.append((task_name, result))
def __iter__(self):
"""Iterator."""
for info in self._task_names:
yield info
class GetInfoByID(object):
"""Find task name by task id."""
def __init__(self, task_id):
"""Init."""
self._task_id = task_id
self.task_name = ''
def __call__(self, task_name, result):
"""Search task name."""
if result.id == self._task_id:
self.task_name = task_name
self.result = result
def replace_task_id(result, old_task_id, new_task_id):
"""Replace task id in a serialized version of results."""
try:
(head, tail) = result
if head == old_task_id:
return new_task_id, replace_task_id(tail, old_task_id, new_task_id)
else:
return [replace_task_id(head, old_task_id, new_task_id),
replace_task_id(tail, old_task_id, new_task_id)]
except ValueError:
if isinstance(result, list) or isinstance(result, tuple):
return [replace_task_id(r, old_task_id, new_task_id)
for r in result]
return result
except TypeError:
return result | PypiClean |
/Flask-Perm-0.2.8.tar.gz/Flask-Perm-0.2.8/flask_perm/services/user_permission.py | from sqlalchemy import func
from sqlalchemy.exc import IntegrityError
from ..core import db
from ..models import UserPermission
def create(user_id, permission_id):
user_permission = UserPermission(
user_id=user_id,
permission_id=permission_id,
)
db.session.add(user_permission)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
user_permission = UserPermission.query.filter_by(
user_id=user_id,
permission_id=permission_id,
).first()
return user_permission
def delete(user_permission_id):
user_permission = UserPermission.query.get(user_permission_id)
if user_permission:
db.session.delete(user_permission)
db.session.commit()
def get(id):
return UserPermission.query.get(id)
def delete_by_user(user_id):
user_permissions = UserPermission.query.filter_by(
user_id=user_id
).all()
for user_permission in user_permissions:
db.session.delete(user_permission)
db.session.commit()
def delete_by_permission(permission_id):
user_permissions = UserPermission.query.filter_by(
permission_id=permission_id,
).all()
for user_permission in user_permissions:
db.session.delete(user_permission)
db.session.commit()
def get_users_by_permission(permission_id):
rows = UserPermission.query.filter_by(
permission_id=permission_id
).with_entities(
UserPermission.user_id
).all()
return [row.user_id for row in rows]
def get_permissions_by_user(user_id):
rows = UserPermission.query.filter_by(
user_id=user_id
).with_entities(
UserPermission.permission_id
).all()
return [row.permission_id for row in rows]
def filter_user_permissions(filter_by, offset, limit, sort_field='created_at', sort_dir='desc'):
query = UserPermission.query
if filter_by:
query = query.filter_by(**filter_by)
field = getattr(UserPermission, sort_field)
order_by = getattr(field, sort_dir.lower())()
return query.order_by(order_by).offset(offset).limit(limit).all()
def count_filter_user_permission(filter_by, offset, limit):
query = UserPermission.query
if filter_by:
query = query.filter_by(**filter_by)
return query.value(func.count(UserPermission.id))
def rest(user_permission):
return dict(
id=user_permission.id,
user_id=user_permission.user_id,
permission_id=user_permission.permission_id,
) | PypiClean |
/FsnViz-0.3.0.tar.gz/FsnViz-0.3.0/fsnviz/fusioncatcher.py | from crimson.fusioncatcher import parse
from .models import CircosEntry, CircosLabel, CircosLink, FusionToolResults
from .utils import adjust_chrom
__all__ = ["FusionCatcherResults"]
class FusionCatcherResults(FusionToolResults):
"""Class representing a FusionCatcher run result."""
mito_names = ("chrM", "M", "MT")
def __init__(self, results_fname, config, circos_config, tpl_params):
super().__init__(results_fname, config, circos_config, tpl_params)
self.payload = parse(results_fname)
def _make_circos_entry(self, raw_entry):
left = raw_entry["5end"]
right = raw_entry["3end"]
if left["chromosome"] in self.mito_names:
return
if right["chromosome"] in self.mito_names:
return
lchrom = adjust_chrom(left["chromosome"])
rchrom = adjust_chrom(right["chromosome"])
link = CircosLink(lchrom, left["position"], left["position"] + 1,
rchrom, right["position"], right["position"] + 1)
geneA = CircosLabel(lchrom, left["position"], left["position"] + 1,
left["geneSymbol"])
geneB = CircosLabel(rchrom, right["position"], right["position"] + 1,
right["geneSymbol"])
njr = raw_entry["nSpanningUniqueReads"]
nsf = raw_entry["nSpanningPairs"]
jrA = CircosLabel(lchrom, left["position"], left["position"] + 1,
njr)
jrB = CircosLabel(rchrom, right["position"], right["position"] + 1,
njr)
sfA = CircosLabel(lchrom, left["position"], left["position"] + 1,
nsf)
sfB = CircosLabel(rchrom, right["position"], right["position"] + 1,
nsf)
return CircosEntry(link, [geneA, geneB], [jrA, jrB], [sfA, sfB])
@property
def circos_entries(self):
if not hasattr(self, "_circos_entries"):
entries = [self._make_circos_entry(x) for x in self.payload]
self._circos_entries = list(filter(None, entries))
return self._circos_entries | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/db/backends/oracle/utils.py | import datetime
from .base import Database
class InsertVar:
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
types = {
"AutoField": int,
"BigAutoField": int,
"SmallAutoField": int,
"IntegerField": int,
"BigIntegerField": int,
"SmallIntegerField": int,
"PositiveBigIntegerField": int,
"PositiveSmallIntegerField": int,
"PositiveIntegerField": int,
"FloatField": Database.NATIVE_FLOAT,
"DateTimeField": Database.TIMESTAMP,
"DateField": Database.Date,
"DecimalField": Database.NUMBER,
}
def __init__(self, field):
internal_type = getattr(field, "target_field", field).get_internal_type()
self.db_type = self.types.get(internal_type, str)
self.bound_param = None
def bind_parameter(self, cursor):
self.bound_param = cursor.cursor.var(self.db_type)
return self.bound_param
def get_value(self):
return self.bound_param.getvalue()
class Oracle_datetime(datetime.datetime):
"""
A datetime object, with an additional class attribute
to tell cx_Oracle to save the microseconds too.
"""
input_size = Database.TIMESTAMP
@classmethod
def from_datetime(cls, dt):
return Oracle_datetime(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
)
class BulkInsertMapper:
BLOB = "TO_BLOB(%s)"
DATE = "TO_DATE(%s)"
INTERVAL = "CAST(%s as INTERVAL DAY(9) TO SECOND(6))"
NCLOB = "TO_NCLOB(%s)"
NUMBER = "TO_NUMBER(%s)"
TIMESTAMP = "TO_TIMESTAMP(%s)"
types = {
"AutoField": NUMBER,
"BigAutoField": NUMBER,
"BigIntegerField": NUMBER,
"BinaryField": BLOB,
"BooleanField": NUMBER,
"DateField": DATE,
"DateTimeField": TIMESTAMP,
"DecimalField": NUMBER,
"DurationField": INTERVAL,
"FloatField": NUMBER,
"IntegerField": NUMBER,
"PositiveBigIntegerField": NUMBER,
"PositiveIntegerField": NUMBER,
"PositiveSmallIntegerField": NUMBER,
"SmallAutoField": NUMBER,
"SmallIntegerField": NUMBER,
"TextField": NCLOB,
"TimeField": TIMESTAMP,
}
def dsn(settings_dict):
if settings_dict["PORT"]:
host = settings_dict["HOST"].strip() or "localhost"
return Database.makedsn(host, int(settings_dict["PORT"]), settings_dict["NAME"])
return settings_dict["NAME"] | PypiClean |
/AxlNLP-0.0.1.tar.gz/AxlNLP-0.0.1/axlnlp/dataset/featureset.py | import numpy as np
from abc import ABC, abstractmethod
import os
# axl nlp
import axlnlp.utils as u
from axlnlp import get_logger
logger = get_logger(__name__)
class FeatureSet:
def __init__(self, dataset, features:list, dump_path:str="/tmp/TEST_FEATURE.pkl"):
self.dataset = dataset
self.dump_path = dump_path
self.__fm_params = features # if isinstance(features, dict) else dict(features)
self.__init_fms()
if self.train:
self.__fit()
if os.path.exists(dump_path):
self.load()
else:
self.__process_dataset()
self.active = False
def save(self):
logger.info(f"Dumping matrix and init params to {self.dump_path}")
u.pickle_data([self.feature_matrixes, self.__fm_params, self.feature_dim], self.dump_path)
def load(self):
logger.info(f"Loading matrix and init params from {self.dump_path}")
self.feature_matrixes, self.__fm_params, self.feature_dim = u.load_pickle_data(self.dump_path)
def __init_fms(self):
self.feature_models = []
are_trained = []
for fm, *params in self.__fm_params:
logger.info(f"Init {fm} ...")
if not params:
m = fm(self.dataset)
elif isinstance(params[0], dict):
m = fm(self.dataset,**params[0])
elif isinstance(params, list):
m = fm(self.dataset,*params[0])
else:
raise RuntimeError("Feature Model init failed ...")
if m.trainable:
are_trained.append(m.trained)
self.feature_models.append(m)
self.train = False in are_trained
self.feature_names = [m.name for m in self.feature_models]
def __fit(self):
for fm in self.feature_models:
if fm.trainable:
fm.fit()
def __process_dataset(self):
feature_matrixes = {}
feature_dim = 0
for fm in self.feature_models:
feature_dim += fm.feature_dim
if fm.trainable:
split_matrixes = {}
for i, split_set in self.dataset.splits.items():
size = max(self.dataset.level_dfs[self.dataset.sample_level]["id"].to_numpy()) + 1
matrix = np.zeros((size, self.dataset.max_sample_length, fm.feature_dim))
for split_type, split_ids in split_set.items():
feature_matrix = fm.extract(
sample_ids=split_ids,
split_id=i
)
matrix[split_ids] = feature_matrix
split_matrixes[i] = matrix
feature_matrixes[fm.name] = split_matrixes
else:
feature_matrixes[fm.name] = fm.extract()
self.feature_dim = feature_dim
self.feature_matrixes = feature_matrixes
self.save()
self.deactivate()
def deactivate(self):
self.feature_models = []
def activate(self):
self.__init_fms()
for fm in self.feature_models:
if fm.trainable:
fm.load()
self.active = True
@u.timer
def get(self,split_id, sample_ids=None):
matrixes = []
for feature in self.feature_names:
feature_matrix = self.feature_matrixes[feature]
if isinstance(feature_matrix, dict):
feature_matrix = feature_matrix[split_id]
#print(feature, feature_matrix.shape)
matrixes.append(feature_matrix)
matrix = np.concatenate(matrixes, axis=2)
#print("CONCAT", matrix.shape)
#if sample_ids is not None:
#matrix = matrix[self.sample_id2position[sample_ids]]
return matrix
def extract(self):
if not self.active:
raise RuntimeError("Feature Models are not active. Call .activate() on FeatureSet to load feature models.")
# check if id is in dataset?
# if not run add sample
# then get it?
raise NotImplementedError() | PypiClean |
/NlvWxPython-4.2.0-cp37-cp37m-win_amd64.whl/wx/lib/masked/textctrl.py | import wx
from wx.lib.masked import *
# jmg 12/9/03 - when we cut ties with Py 2.2 and earlier, this would
# be a good place to implement the 2.3 logger class
from wx.tools.dbg import Logger
##dbg = Logger()
##dbg(enable=1)
class BaseMaskedTextCtrl( wx.TextCtrl, MaskedEditMixin ):
"""
This is the primary derivation from MaskedEditMixin. It provides
a general masked text control that can be configured with different
masks.
However, this is done with an extra level of inheritance, so that
"general" classes like masked.TextCtrl can have all possible attributes,
while derived classes, like masked.TimeCtrl and masked.NumCtrl
can prevent exposure of those optional attributes of their base
class that do not make sense for their derivation. Therefore,
we define::
BaseMaskedTextCtrl(TextCtrl, MaskedEditMixin)
and::
masked.TextCtrl(BaseMaskedTextCtrl, MaskedEditAccessorsMixin).
This allows us to then derive::
masked.NumCtrl( BaseMaskedTextCtrl )
and not have to expose all the same accessor functions for the
derived control when they don't all make sense for it.
In practice, BaseMaskedTextCtrl should never be instantiated directly,
but should only be used in derived classes.
"""
def __init__( self, parent, id=-1, value = '',
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style = wx.TE_PROCESS_TAB,
validator=wx.DefaultValidator,
name = 'maskedTextCtrl',
setupEventHandling = True,
**kwargs):
"""
Default class constructor.
:param wx.Window `parent`: the window parent. Must not be ``None``;
:param integer `id`: window identifier. A value of -1 indicates a default value;
:param string `value`: value to be shown;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:type `pos`: tuple or :class:`wx.Point`
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param integer `style`: the window style;
:param wx.Validator `validator`: this is mainly provided for data-transfer, as control does
its own validation;
:param string `name`: the window name;
:param boolean `setupEventHandling`: setup event handling by default.
"""
if not hasattr(self, 'this'):
wx.TextCtrl.__init__(self, parent, id, value='',
pos=pos, size = size,
style=style, validator=validator,
name=name)
self._PostInit(setupEventHandling = setupEventHandling,
name=name, value=value,**kwargs )
def _PostInit(self,setupEventHandling=True,
name='maskedTextCtrl' , value='', **kwargs):
self.controlInitialized = True
MaskedEditMixin.__init__( self, name, **kwargs )
self._SetInitialValue(value)
if setupEventHandling:
## Setup event handlers
self.Bind(wx.EVT_SET_FOCUS, self._OnFocus ) ## defeat automatic full selection
self.Bind(wx.EVT_KILL_FOCUS, self._OnKillFocus ) ## run internal validator
self.Bind(wx.EVT_LEFT_DCLICK, self._OnDoubleClick) ## select field under cursor on dclick
self.Bind(wx.EVT_RIGHT_UP, self._OnContextMenu ) ## bring up an appropriate context menu
self.Bind(wx.EVT_KEY_DOWN, self._OnKeyDown ) ## capture control events not normally seen, eg ctrl-tab.
self.Bind(wx.EVT_CHAR, self._OnChar ) ## handle each keypress
self.Bind(wx.EVT_TEXT, self._OnTextChange ) ## color control appropriately & keep
## track of previous value for undo
def __repr__(self):
return "<BaseMaskedTextCtrl: %s>" % self.GetValue()
def _GetSelection(self):
"""
Allow mixin to get the text selection of this control.
REQUIRED by any class derived from MaskedEditMixin.
"""
return self.GetSelection()
def _SetSelection(self, sel_start, sel_to):
"""
Allow mixin to set the text selection of this control.
REQUIRED by any class derived from MaskedEditMixin.
"""
#### dbg("MaskedTextCtrl::_SetSelection(%(sel_start)d, %(sel_to)d)" % locals())
if self:
return self.SetSelection( sel_start, sel_to )
## def SetSelection(self, sel_start, sel_to):
## """
## This is just for debugging...
## """
## dbg("MaskedTextCtrl::SetSelection(%(sel_start)d, %(sel_to)d)" % locals())
## wx.TextCtrl.SetSelection(self, sel_start, sel_to)
def _GetInsertionPoint(self):
return self.GetInsertionPoint()
def _SetInsertionPoint(self, pos):
#### dbg("MaskedTextCtrl::_SetInsertionPoint(%(pos)d)" % locals())
if self:
self.SetInsertionPoint(pos)
## def SetInsertionPoint(self, pos):
## """
## This is just for debugging...
## """
## dbg("MaskedTextCtrl::SetInsertionPoint(%(pos)d)" % locals())
## wx.TextCtrl.SetInsertionPoint(self, pos)
def IsEmpty(*args, **kw):
return MaskedEditMixin.IsEmpty(*args, **kw)
def _GetValue(self):
"""
Allow mixin to get the raw value of the control with this function.
REQUIRED by any class derived from MaskedEditMixin.
"""
return self.GetValue()
def _SetValue(self, value):
"""
Allow mixin to set the raw value of the control with this function.
REQUIRED by any class derived from MaskedEditMixin.
"""
## dbg('MaskedTextCtrl::_SetValue("%(value)s", use_change_value=%(use_change_value)d)' % locals(), indent=1)
# Record current selection and insertion point, for undo
self._prevSelection = self._GetSelection()
self._prevInsertionPoint = self._GetInsertionPoint()
wx.TextCtrl.SetValue(self, value)
## dbg(indent=0)
def _ChangeValue(self, value):
"""
Allow mixin to set the raw value of the control with this function without
generating an event as a result. (New for masked.TextCtrl as of 2.8.4)
"""
## dbg('MaskedTextCtrl::_ChangeValue("%(value)s", use_change_value=%(use_change_value)d)' % locals(), indent=1)
# Record current selection and insertion point, for undo
self._prevSelection = self._GetSelection()
self._prevInsertionPoint = self._GetInsertionPoint()
wx.TextCtrl.ChangeValue(self, value)
## dbg(indent=0)
def SetValue(self, value):
"""
This function redefines the externally accessible :meth:`TextCtrl.SetValue`
to be a smart "paste" of the text in question, so as not to corrupt the
masked control.
.. note::
This must be done in the class derived from the base wx control.
"""
self.ModifyValue(value, use_change_value=False)
def ChangeValue(self, value):
"""
Provided to accommodate similar functionality added to base
control in wxPython 2.7.1.1.
:param string `value`: new value for control, this will not fire an event
"""
self.ModifyValue(value, use_change_value=True)
def ModifyValue(self, value, use_change_value=False):
"""
This factored function of common code does the bulk of the work for
SetValue and ChangeValue.
:param string `value`: new value for control
:param boolean `use_change_value`: if True uses :meth:`~lib.masked.textctrl.TextCtrl.ChangeValue`
"""
## dbg('MaskedTextCtrl::ModifyValue("%(value)s", use_change_value=%(use_change_value)d)' % locals(), indent=1)
if not self._mask:
if use_change_value:
wx.TextCtrl.ChangeValue(self, value) # revert to base control behavior
else:
wx.TextCtrl.SetValue(self, value) # revert to base control behavior
return
# empty previous contents, replacing entire value:
self._SetInsertionPoint(0)
self._SetSelection(0, self._masklength)
if self._signOk and self._useParens:
signpos = value.find('-')
if signpos != -1:
value = value[:signpos] + '(' + value[signpos+1:].strip() + ')'
elif value.find(')') == -1 and len(value) < self._masklength:
value += ' ' # add place holder for reserved space for right paren
if( len(value) < self._masklength # value shorter than control
and (self._isFloat or self._isInt) # and it's a numeric control
and self._ctrl_constraints._alignRight ): # and it's a right-aligned control
## dbg('len(value)', len(value), ' < self._masklength', self._masklength)
# try to intelligently "pad out" the value to the right size:
value = self._template[0:self._masklength - len(value)] + value
if self._isFloat and value.find('.') == -1:
value = value[1:]
## dbg('padded value = "%s"' % value)
# make Set/ChangeValue behave the same as if you had typed the value in:
try:
value, replace_to = self._Paste(value, raise_on_invalid=True, just_return_value=True)
if self._isFloat:
self._isNeg = False # (clear current assumptions)
value = self._adjustFloat(value)
elif self._isInt:
self._isNeg = False # (clear current assumptions)
value = self._adjustInt(value)
elif self._isDate and not self.IsValid(value) and self._4digityear:
value = self._adjustDate(value, fixcentury=True)
except ValueError:
# If date, year might be 2 digits vs. 4; try adjusting it:
if self._isDate and self._4digityear:
dateparts = value.split(' ')
dateparts[0] = self._adjustDate(dateparts[0], fixcentury=True)
value = ' '.join(dateparts)
## dbg('adjusted value: "%s"' % value)
value, replace_to = self._Paste(value, raise_on_invalid=True, just_return_value=True)
else:
## dbg('exception thrown', indent=0)
raise
if use_change_value:
self._ChangeValue(value)
else:
self._SetValue(value) # note: to preserve similar capability, .SetValue()
# does not change IsModified()
#### dbg('queuing insertion after ._Set/ChangeValue', replace_to)
# set selection to last char replaced by paste
wx.CallAfter(self._SetInsertionPoint, replace_to)
wx.CallAfter(self._SetSelection, replace_to, replace_to)
## dbg(indent=0)
def SetFont(self, *args, **kwargs):
"""
Set the font, then recalculate control size, if appropriate.
see :meth:`TextCtrl.SetFont` for valid arguments
"""
wx.TextCtrl.SetFont(self, *args, **kwargs)
if self._autofit:
## dbg('calculated size:', self._CalcSize())
self.SetClientSize(self._CalcSize())
width = self.GetSize().width
height = self.GetBestSize().height
## dbg('setting client size to:', (width, height))
self.SetInitialSize((width, height))
def Clear(self):
"""
Blanks the current control value by replacing it with the default value.
"""
## dbg("MaskedTextCtrl::Clear - value reset to default value (template)")
if self._mask:
self.ClearValue()
else:
wx.TextCtrl.Clear(self) # else revert to base control behavior
def _Refresh(self):
"""
Allow mixin to refresh the base control with this function.
REQUIRED by any class derived from MaskedEditMixin.
"""
## dbg('MaskedTextCtrl::_Refresh', indent=1)
wx.TextCtrl.Refresh(self)
## dbg(indent=0)
def Refresh(self):
"""
This function redefines the externally accessible :meth:`TextCtrl.Refresh`
to validate the contents of the masked control as it refreshes.
.. note::
This must be done in the class derived from the base wx control.
"""
## dbg('MaskedTextCtrl::Refresh', indent=1)
self._CheckValid()
self._Refresh()
## dbg(indent=0)
def _IsEditable(self):
"""
Allow mixin to determine if the base control is editable with this function.
REQUIRED by any class derived from MaskedEditMixin.
"""
return wx.TextCtrl.IsEditable(self)
def Cut(self):
"""
This function redefines the externally accessible :meth:`TextCtrl.Cut`
to be a smart "erase" of the text in question, so as not to corrupt the
masked control.
.. note::
This must be done in the class derived from the base wx control.
"""
if self._mask:
self._Cut() # call the mixin's Cut method
else:
wx.TextCtrl.Cut(self) # else revert to base control behavior
def Paste(self):
"""
This function redefines the externally accessible :meth:`TextCtrl.Paste`
to be a smart "paste" of the text in question, so as not to corrupt the
masked control.
.. note::
This must be done in the class derived from the base wx control.
"""
if self._mask:
self._Paste() # call the mixin's Paste method
else:
wx.TextCtrl.Paste(self, value) # else revert to base control behavior
def Undo(self):
"""
This function defines the undo operation for the control.
(The default undo is 1-deep.)
"""
if self._mask:
self._Undo()
else:
wx.TextCtrl.Undo(self) # else revert to base control behavior
def IsModified(self):
"""
This function overrides the raw :meth:`TextCtrl.IsModified` method,
because the masked edit mixin uses SetValue to change the value, which
doesn't modify the state of this attribute. So, the derived control
keeps track on each keystroke to see if the value changes, and if so,
it's been modified.
"""
return wx.TextCtrl.IsModified(self) or self.modified
def _CalcSize(self, size=None):
"""
Calculate automatic size if allowed; use base mixin function.
"""
return self._calcSize(size)
class TextCtrl( BaseMaskedTextCtrl, MaskedEditAccessorsMixin ):
"""
The "user-visible" masked text control; it is identical to the
BaseMaskedTextCtrl class it's derived from.
(This extra level of inheritance allows us to add the generic
set of masked edit parameters only to this class while allowing
other classes to derive from the "base" masked text control,
and provide a smaller set of valid accessor functions.)
See BaseMaskedTextCtrl for available methods.
"""
pass
class PreMaskedTextCtrl( BaseMaskedTextCtrl, MaskedEditAccessorsMixin ):
"""
This class exists to support the use of XRC subclassing.
"""
# This should really be wx.EVT_WINDOW_CREATE but it is not
# currently delivered for native controls on all platforms, so
# we'll use EVT_SIZE instead. It should happen shortly after the
# control is created as the control is set to its "best" size.
_firstEventType = wx.EVT_SIZE
def __init__(self):
wx.TextCtrl.__init__(self)
self.Bind(self._firstEventType, self.OnCreate)
def OnCreate(self, evt):
self.Unbind(self._firstEventType)
self._PostInit()
__i=0
## CHANGELOG:
## ====================
## Version 1.3
## - Added support for ChangeValue() function, similar to that of the base
## control, added in wxPython 2.7.1.1.
##
## Version 1.2
## - Converted docstrings to reST format, added doc for ePyDoc.
## removed debugging override functions.
##
## Version 1.1
## 1. Added .SetFont() method that properly resizes control
## 2. Modified control to support construction via XRC mechanism. | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojo/parser.js | define("dojo/parser",["./_base/kernel","./_base/lang","./_base/array","./_base/html","./_base/window","./_base/url","./_base/json","./aspect","./date/stamp","./query","./on","./ready"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,_a,_b){
new Date("X");
var _c={"dom-attributes-explicit":document.createElement("div").attributes.length<40};
function _d(_e){
return _c[_e];
};
_1.parser=new function(){
var _f={};
function _10(_11){
var map={};
for(var _12 in _11){
if(_12.charAt(0)=="_"){
continue;
}
map[_12.toLowerCase()]=_12;
}
return map;
};
_8.after(_2,"extend",function(){
_f={};
},true);
var _13={};
this._functionFromScript=function(_14,_15){
var _16="";
var _17="";
var _18=(_14.getAttribute(_15+"args")||_14.getAttribute("args"));
if(_18){
_3.forEach(_18.split(/\s*,\s*/),function(_19,idx){
_16+="var "+_19+" = arguments["+idx+"]; ";
});
}
var _1a=_14.getAttribute("with");
if(_1a&&_1a.length){
_3.forEach(_1a.split(/\s*,\s*/),function(_1b){
_16+="with("+_1b+"){";
_17+="}";
});
}
return new Function(_16+_14.innerHTML+_17);
};
this.instantiate=function(_1c,_1d,_1e){
var _1f=[],_1d=_1d||{};
_1e=_1e||{};
var _20=(_1e.scope||_1._scopeName)+"Type",_21="data-"+(_1e.scope||_1._scopeName)+"-",_22=_21+"type",_23=_21+"props",_24=_21+"attach-point",_25=_21+"attach-event",_26=_21+"id";
var _27={};
_3.forEach([_23,_22,_20,_26,"jsId",_24,_25,"dojoAttachPoint","dojoAttachEvent","class","style"],function(_28){
_27[_28.toLowerCase()]=_28.replace(_1e.scope,"dojo");
});
_3.forEach(_1c,function(obj){
if(!obj){
return;
}
var _29=obj.node||obj,_2a=_20 in _1d?_1d[_20]:obj.node?obj.type:(_29.getAttribute(_22)||_29.getAttribute(_20)),_2b=_13[_2a]||(_13[_2a]=_2.getObject(_2a)),_2c=_2b&&_2b.prototype;
if(!_2b){
throw new Error("Could not load class '"+_2a);
}
var _2d={};
if(_1e.defaults){
_2.mixin(_2d,_1e.defaults);
}
if(obj.inherited){
_2.mixin(_2d,obj.inherited);
}
var _2e;
if(_d("dom-attributes-explicit")){
_2e=_29.attributes;
}else{
var _2f=/^input$|^img$/i.test(_29.nodeName)?_29:_29.cloneNode(false),_30=_2f.outerHTML.replace(/=[^\s"']+|="[^"]*"|='[^']*'/g,"").replace(/^\s*<[a-zA-Z0-9]*/,"").replace(/>.*$/,"");
_2e=_3.map(_30.split(/\s+/),function(_31){
var _32=_31.toLowerCase();
return {name:_31,value:(_29.nodeName=="LI"&&_31=="value")||_32=="enctype"?_29.getAttribute(_32):_29.getAttributeNode(_32).value,specified:true};
});
}
var i=0,_33;
while(_33=_2e[i++]){
if(!_33||!_33.specified){
continue;
}
var _34=_33.name,_35=_34.toLowerCase(),_36=_33.value;
if(_35 in _27){
switch(_27[_35]){
case "data-dojo-props":
var _37=_36;
break;
case "data-dojo-id":
case "jsId":
var _38=_36;
break;
case "data-dojo-attach-point":
case "dojoAttachPoint":
_2d.dojoAttachPoint=_36;
break;
case "data-dojo-attach-event":
case "dojoAttachEvent":
_2d.dojoAttachEvent=_36;
break;
case "class":
_2d["class"]=_29.className;
break;
case "style":
_2d["style"]=_29.style&&_29.style.cssText;
break;
}
}else{
if(!(_34 in _2c)){
var map=(_f[_2a]||(_f[_2a]=_10(_2c)));
_34=map[_35]||_34;
}
if(_34 in _2c){
switch(typeof _2c[_34]){
case "string":
_2d[_34]=_36;
break;
case "number":
_2d[_34]=_36.length?Number(_36):NaN;
break;
case "boolean":
_2d[_34]=_36.toLowerCase()!="false";
break;
case "function":
if(_36===""||_36.search(/[^\w\.]+/i)!=-1){
_2d[_34]=new Function(_36);
}else{
_2d[_34]=_2.getObject(_36,false)||new Function(_36);
}
break;
default:
var _39=_2c[_34];
_2d[_34]=(_39&&"length" in _39)?(_36?_36.split(/\s*,\s*/):[]):(_39 instanceof Date)?(_36==""?new Date(""):_36=="now"?new Date():_9.fromISOString(_36)):(_39 instanceof _1._Url)?(_1.baseUrl+_36):_7.fromJson(_36);
}
}else{
_2d[_34]=_36;
}
}
}
if(_37){
try{
_37=_7.fromJson.call(_1e.propsThis,"{"+_37+"}");
_2.mixin(_2d,_37);
}
catch(e){
throw new Error(e.toString()+" in data-dojo-props='"+_37+"'");
}
}
_2.mixin(_2d,_1d);
var _3a=obj.node?obj.scripts:(_2b&&(_2b._noScript||_2c._noScript)?[]:_a("> script[type^='dojo/']",_29));
var _3b=[],_3c=[],_3d=[],on=[];
if(_3a){
for(i=0;i<_3a.length;i++){
var _3e=_3a[i];
_29.removeChild(_3e);
var _3f=(_3e.getAttribute(_21+"event")||_3e.getAttribute("event")),_40=_3e.getAttribute(_21+"prop"),_2a=_3e.getAttribute("type"),nf=this._functionFromScript(_3e,_21);
if(_3f){
if(_2a=="dojo/connect"){
_3b.push({event:_3f,func:nf});
}else{
if(_2a=="dojo/on"){
on.push({event:_3f,func:nf});
}else{
_2d[_3f]=nf;
}
}
}else{
if(_2a=="dojo/watch"){
_3d.push({prop:_40,func:nf});
}else{
_3c.push(nf);
}
}
}
}
var _41=_2b.markupFactory||_2c.markupFactory;
var _42=_41?_41(_2d,_29,_2b):new _2b(_2d,_29);
_1f.push(_42);
if(_38){
_2.setObject(_38,_42);
}
for(i=0;i<_3b.length;i++){
_8.after(_42,_3b[i].event,_1.hitch(_42,_3b[i].func),true);
}
for(i=0;i<_3c.length;i++){
_3c[i].call(_42);
}
for(i=0;i<_3d.length;i++){
_42.watch(_3d[i].prop,_3d[i].func);
}
for(i=0;i<on.length;i++){
_b(_42,on[i].event,on[i].func);
}
},this);
if(!_1d._started){
_3.forEach(_1f,function(_43){
if(!_1e.noStart&&_43&&_2.isFunction(_43.startup)&&!_43._started){
_43.startup();
}
});
}
return _1f;
};
this.parse=function(_44,_45){
var _46;
if(!_45&&_44&&_44.rootNode){
_45=_44;
_46=_45.rootNode;
}else{
_46=_44;
}
_46=_46?_4.byId(_46):_5.body();
_45=_45||{};
var _47=(_45.scope||_1._scopeName)+"Type",_48="data-"+(_45.scope||_1._scopeName)+"-",_49=_48+"type",_4a=_48+"textdir";
var _4b=[];
var _4c=_46.firstChild;
var _4d=_45&&_45.inherited;
if(!_4d){
function _4e(_4f,_50){
return (_4f.getAttribute&&_4f.getAttribute(_50))||(_4f!==_5.doc&&_4f!==_5.doc.documentElement&&_4f.parentNode?_4e(_4f.parentNode,_50):null);
};
_4d={dir:_4e(_46,"dir"),lang:_4e(_46,"lang"),textDir:_4e(_46,_4a)};
for(var key in _4d){
if(!_4d[key]){
delete _4d[key];
}
}
}
var _51={inherited:_4d};
var _52;
var _53;
function _54(_55){
if(!_55.inherited){
_55.inherited={};
var _56=_55.node,_57=_54(_55.parent);
var _58={dir:_56.getAttribute("dir")||_57.dir,lang:_56.getAttribute("lang")||_57.lang,textDir:_56.getAttribute(_4a)||_57.textDir};
for(var key in _58){
if(_58[key]){
_55.inherited[key]=_58[key];
}
}
}
return _55.inherited;
};
while(true){
if(!_4c){
if(!_51||!_51.node){
break;
}
_4c=_51.node.nextSibling;
_52=_51.scripts;
_53=false;
_51=_51.parent;
continue;
}
if(_4c.nodeType!=1){
_4c=_4c.nextSibling;
continue;
}
if(_52&&_4c.nodeName.toLowerCase()=="script"){
_59=_4c.getAttribute("type");
if(_59&&/^dojo\/\w/i.test(_59)){
_52.push(_4c);
}
_4c=_4c.nextSibling;
continue;
}
if(_53){
_4c=_4c.nextSibling;
continue;
}
var _59=_4c.getAttribute(_49)||_4c.getAttribute(_47);
var _5a=_4c.firstChild;
if(!_59&&(!_5a||(_5a.nodeType==3&&!_5a.nextSibling))){
_4c=_4c.nextSibling;
continue;
}
var _5b={node:_4c,scripts:_52,parent:_51};
var _5c=_59&&(_13[_59]||(_13[_59]=_2.getObject(_59))),_5d=_5c&&!_5c.prototype._noScript?[]:null;
if(_59){
_4b.push({"type":_59,node:_4c,scripts:_5d,inherited:_54(_5b)});
}
_4c=_5a;
_52=_5d;
_53=_5c&&_5c.prototype.stopParser&&!(_45&&_45.template);
_51=_5b;
}
var _5e=_45&&_45.template?{template:true}:null;
return this.instantiate(_4b,_5e,_45);
};
}();
if(_1.config.parseOnLoad){
_1.ready(100,_1.parser,"parse");
}
return _1.parser;
}); | PypiClean |
/DVDev-0.1.3.tar.gz/DVDev-0.1.3/dvdev/public/js/development-bundle/ui/ui.datepicker.js | (function($) { // hide the namespace
$.extend($.ui, { datepicker: { version: "1.7" } });
var PROP_NAME = 'datepicker';
/* Date picker manager.
Use the singleton instance of this class, $.datepicker, to interact with the date picker.
Settings for (groups of) date pickers are maintained in an instance object,
allowing multiple different settings on the same page. */
function Datepicker() {
this.debug = false; // Change this to true to start debugging
this._curInst = null; // The current instance in use
this._keyEvent = false; // If the last event was a key event
this._disabledInputs = []; // List of date picker inputs that have been disabled
this._datepickerShowing = false; // True if the popup picker is showing , false if not
this._inDialog = false; // True if showing within a "dialog", false if not
this._mainDivId = 'ui-datepicker-div'; // The ID of the main datepicker division
this._inlineClass = 'ui-datepicker-inline'; // The name of the inline marker class
this._appendClass = 'ui-datepicker-append'; // The name of the append marker class
this._triggerClass = 'ui-datepicker-trigger'; // The name of the trigger marker class
this._dialogClass = 'ui-datepicker-dialog'; // The name of the dialog marker class
this._disableClass = 'ui-datepicker-disabled'; // The name of the disabled covering marker class
this._unselectableClass = 'ui-datepicker-unselectable'; // The name of the unselectable cell marker class
this._currentClass = 'ui-datepicker-current-day'; // The name of the current day marker class
this._dayOverClass = 'ui-datepicker-days-cell-over'; // The name of the day hover marker class
this.regional = []; // Available regional settings, indexed by language code
this.regional[''] = { // Default regional settings
closeText: 'Done', // Display text for close link
prevText: 'Prev', // Display text for previous month link
nextText: 'Next', // Display text for next month link
currentText: 'Today', // Display text for current month link
monthNames: ['January','February','March','April','May','June',
'July','August','September','October','November','December'], // Names of months for drop-down and formatting
monthNamesShort: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'], // For formatting
dayNames: ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'], // For formatting
dayNamesShort: ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'], // For formatting
dayNamesMin: ['Su','Mo','Tu','We','Th','Fr','Sa'], // Column headings for days starting at Sunday
dateFormat: 'mm/dd/yy', // See format options on parseDate
firstDay: 0, // The first day of the week, Sun = 0, Mon = 1, ...
isRTL: false // True if right-to-left language, false if left-to-right
};
this._defaults = { // Global defaults for all the date picker instances
showOn: 'focus', // 'focus' for popup on focus,
// 'button' for trigger button, or 'both' for either
showAnim: 'show', // Name of jQuery animation for popup
showOptions: {}, // Options for enhanced animations
defaultDate: null, // Used when field is blank: actual date,
// +/-number for offset from today, null for today
appendText: '', // Display text following the input box, e.g. showing the format
buttonText: '...', // Text for trigger button
buttonImage: '', // URL for trigger button image
buttonImageOnly: false, // True if the image appears alone, false if it appears on a button
hideIfNoPrevNext: false, // True to hide next/previous month links
// if not applicable, false to just disable them
navigationAsDateFormat: false, // True if date formatting applied to prev/today/next links
gotoCurrent: false, // True if today link goes back to current selection instead
changeMonth: false, // True if month can be selected directly, false if only prev/next
changeYear: false, // True if year can be selected directly, false if only prev/next
showMonthAfterYear: false, // True if the year select precedes month, false for month then year
yearRange: '-10:+10', // Range of years to display in drop-down,
// either relative to current year (-nn:+nn) or absolute (nnnn:nnnn)
showOtherMonths: false, // True to show dates in other months, false to leave blank
calculateWeek: this.iso8601Week, // How to calculate the week of the year,
// takes a Date and returns the number of the week for it
shortYearCutoff: '+10', // Short year values < this are in the current century,
// > this are in the previous century,
// string value starting with '+' for current year + value
minDate: null, // The earliest selectable date, or null for no limit
maxDate: null, // The latest selectable date, or null for no limit
duration: 'normal', // Duration of display/closure
beforeShowDay: null, // Function that takes a date and returns an array with
// [0] = true if selectable, false if not, [1] = custom CSS class name(s) or '',
// [2] = cell title (optional), e.g. $.datepicker.noWeekends
beforeShow: null, // Function that takes an input field and
// returns a set of custom settings for the date picker
onSelect: null, // Define a callback function when a date is selected
onChangeMonthYear: null, // Define a callback function when the month or year is changed
onClose: null, // Define a callback function when the datepicker is closed
numberOfMonths: 1, // Number of months to show at a time
showCurrentAtPos: 0, // The position in multipe months at which to show the current month (starting at 0)
stepMonths: 1, // Number of months to step back/forward
stepBigMonths: 12, // Number of months to step back/forward for the big links
altField: '', // Selector for an alternate field to store selected dates into
altFormat: '', // The date format to use for the alternate field
constrainInput: true, // The input is constrained by the current date format
showButtonPanel: false // True to show button panel, false to not show it
};
$.extend(this._defaults, this.regional['']);
this.dpDiv = $('<div id="' + this._mainDivId + '" class="ui-datepicker ui-widget ui-widget-content ui-helper-clearfix ui-corner-all ui-helper-hidden-accessible"></div>');
}
$.extend(Datepicker.prototype, {
/* Class name added to elements to indicate already configured with a date picker. */
markerClassName: 'hasDatepicker',
/* Debug logging (if enabled). */
log: function () {
if (this.debug)
console.log.apply('', arguments);
},
/* Override the default settings for all instances of the date picker.
@param settings object - the new settings to use as defaults (anonymous object)
@return the manager object */
setDefaults: function(settings) {
extendRemove(this._defaults, settings || {});
return this;
},
/* Attach the date picker to a jQuery selection.
@param target element - the target input field or division or span
@param settings object - the new settings to use for this date picker instance (anonymous) */
_attachDatepicker: function(target, settings) {
// check for settings on the control itself - in namespace 'date:'
var inlineSettings = null;
for (var attrName in this._defaults) {
var attrValue = target.getAttribute('date:' + attrName);
if (attrValue) {
inlineSettings = inlineSettings || {};
try {
inlineSettings[attrName] = eval(attrValue);
} catch (err) {
inlineSettings[attrName] = attrValue;
}
}
}
var nodeName = target.nodeName.toLowerCase();
var inline = (nodeName == 'div' || nodeName == 'span');
if (!target.id)
target.id = 'dp' + (++this.uuid);
var inst = this._newInst($(target), inline);
inst.settings = $.extend({}, settings || {}, inlineSettings || {});
if (nodeName == 'input') {
this._connectDatepicker(target, inst);
} else if (inline) {
this._inlineDatepicker(target, inst);
}
},
/* Create a new instance object. */
_newInst: function(target, inline) {
var id = target[0].id.replace(/([:\[\]\.])/g, '\\\\$1'); // escape jQuery meta chars
return {id: id, input: target, // associated target
selectedDay: 0, selectedMonth: 0, selectedYear: 0, // current selection
drawMonth: 0, drawYear: 0, // month being drawn
inline: inline, // is datepicker inline or not
dpDiv: (!inline ? this.dpDiv : // presentation div
$('<div class="' + this._inlineClass + ' ui-datepicker ui-widget ui-widget-content ui-helper-clearfix ui-corner-all"></div>'))};
},
/* Attach the date picker to an input field. */
_connectDatepicker: function(target, inst) {
var input = $(target);
if (input.hasClass(this.markerClassName))
return;
var appendText = this._get(inst, 'appendText');
var isRTL = this._get(inst, 'isRTL');
if (appendText)
input[isRTL ? 'before' : 'after']('<span class="' + this._appendClass + '">' + appendText + '</span>');
var showOn = this._get(inst, 'showOn');
if (showOn == 'focus' || showOn == 'both') // pop-up date picker when in the marked field
input.focus(this._showDatepicker);
if (showOn == 'button' || showOn == 'both') { // pop-up date picker when button clicked
var buttonText = this._get(inst, 'buttonText');
var buttonImage = this._get(inst, 'buttonImage');
var trigger = $(this._get(inst, 'buttonImageOnly') ?
$('<img/>').addClass(this._triggerClass).
attr({ src: buttonImage, alt: buttonText, title: buttonText }) :
$('<button type="button"></button>').addClass(this._triggerClass).
html(buttonImage == '' ? buttonText : $('<img/>').attr(
{ src:buttonImage, alt:buttonText, title:buttonText })));
input[isRTL ? 'before' : 'after'](trigger);
trigger.click(function() {
if ($.datepicker._datepickerShowing && $.datepicker._lastInput == target)
$.datepicker._hideDatepicker();
else
$.datepicker._showDatepicker(target);
return false;
});
}
input.addClass(this.markerClassName).keydown(this._doKeyDown).keypress(this._doKeyPress).
bind("setData.datepicker", function(event, key, value) {
inst.settings[key] = value;
}).bind("getData.datepicker", function(event, key) {
return this._get(inst, key);
});
$.data(target, PROP_NAME, inst);
},
/* Attach an inline date picker to a div. */
_inlineDatepicker: function(target, inst) {
var divSpan = $(target);
if (divSpan.hasClass(this.markerClassName))
return;
divSpan.addClass(this.markerClassName).append(inst.dpDiv).
bind("setData.datepicker", function(event, key, value){
inst.settings[key] = value;
}).bind("getData.datepicker", function(event, key){
return this._get(inst, key);
});
$.data(target, PROP_NAME, inst);
this._setDate(inst, this._getDefaultDate(inst));
this._updateDatepicker(inst);
this._updateAlternate(inst);
},
/* Pop-up the date picker in a "dialog" box.
@param input element - ignored
@param dateText string - the initial date to display (in the current format)
@param onSelect function - the function(dateText) to call when a date is selected
@param settings object - update the dialog date picker instance's settings (anonymous object)
@param pos int[2] - coordinates for the dialog's position within the screen or
event - with x/y coordinates or
leave empty for default (screen centre)
@return the manager object */
_dialogDatepicker: function(input, dateText, onSelect, settings, pos) {
var inst = this._dialogInst; // internal instance
if (!inst) {
var id = 'dp' + (++this.uuid);
this._dialogInput = $('<input type="text" id="' + id +
'" size="1" style="position: absolute; top: -100px;"/>');
this._dialogInput.keydown(this._doKeyDown);
$('body').append(this._dialogInput);
inst = this._dialogInst = this._newInst(this._dialogInput, false);
inst.settings = {};
$.data(this._dialogInput[0], PROP_NAME, inst);
}
extendRemove(inst.settings, settings || {});
this._dialogInput.val(dateText);
this._pos = (pos ? (pos.length ? pos : [pos.pageX, pos.pageY]) : null);
if (!this._pos) {
var browserWidth = window.innerWidth || document.documentElement.clientWidth || document.body.clientWidth;
var browserHeight = window.innerHeight || document.documentElement.clientHeight || document.body.clientHeight;
var scrollX = document.documentElement.scrollLeft || document.body.scrollLeft;
var scrollY = document.documentElement.scrollTop || document.body.scrollTop;
this._pos = // should use actual width/height below
[(browserWidth / 2) - 100 + scrollX, (browserHeight / 2) - 150 + scrollY];
}
// move input on screen for focus, but hidden behind dialog
this._dialogInput.css('left', this._pos[0] + 'px').css('top', this._pos[1] + 'px');
inst.settings.onSelect = onSelect;
this._inDialog = true;
this.dpDiv.addClass(this._dialogClass);
this._showDatepicker(this._dialogInput[0]);
if ($.blockUI)
$.blockUI(this.dpDiv);
$.data(this._dialogInput[0], PROP_NAME, inst);
return this;
},
/* Detach a datepicker from its control.
@param target element - the target input field or division or span */
_destroyDatepicker: function(target) {
var $target = $(target);
if (!$target.hasClass(this.markerClassName)) {
return;
}
var nodeName = target.nodeName.toLowerCase();
$.removeData(target, PROP_NAME);
if (nodeName == 'input') {
$target.siblings('.' + this._appendClass).remove().end().
siblings('.' + this._triggerClass).remove().end().
removeClass(this.markerClassName).
unbind('focus', this._showDatepicker).
unbind('keydown', this._doKeyDown).
unbind('keypress', this._doKeyPress);
} else if (nodeName == 'div' || nodeName == 'span')
$target.removeClass(this.markerClassName).empty();
},
/* Enable the date picker to a jQuery selection.
@param target element - the target input field or division or span */
_enableDatepicker: function(target) {
var $target = $(target);
if (!$target.hasClass(this.markerClassName)) {
return;
}
var nodeName = target.nodeName.toLowerCase();
if (nodeName == 'input') {
target.disabled = false;
$target.siblings('button.' + this._triggerClass).
each(function() { this.disabled = false; }).end().
siblings('img.' + this._triggerClass).
css({opacity: '1.0', cursor: ''});
}
else if (nodeName == 'div' || nodeName == 'span') {
var inline = $target.children('.' + this._inlineClass);
inline.children().removeClass('ui-state-disabled');
}
this._disabledInputs = $.map(this._disabledInputs,
function(value) { return (value == target ? null : value); }); // delete entry
},
/* Disable the date picker to a jQuery selection.
@param target element - the target input field or division or span */
_disableDatepicker: function(target) {
var $target = $(target);
if (!$target.hasClass(this.markerClassName)) {
return;
}
var nodeName = target.nodeName.toLowerCase();
if (nodeName == 'input') {
target.disabled = true;
$target.siblings('button.' + this._triggerClass).
each(function() { this.disabled = true; }).end().
siblings('img.' + this._triggerClass).
css({opacity: '0.5', cursor: 'default'});
}
else if (nodeName == 'div' || nodeName == 'span') {
var inline = $target.children('.' + this._inlineClass);
inline.children().addClass('ui-state-disabled');
}
this._disabledInputs = $.map(this._disabledInputs,
function(value) { return (value == target ? null : value); }); // delete entry
this._disabledInputs[this._disabledInputs.length] = target;
},
/* Is the first field in a jQuery collection disabled as a datepicker?
@param target element - the target input field or division or span
@return boolean - true if disabled, false if enabled */
_isDisabledDatepicker: function(target) {
if (!target) {
return false;
}
for (var i = 0; i < this._disabledInputs.length; i++) {
if (this._disabledInputs[i] == target)
return true;
}
return false;
},
/* Retrieve the instance data for the target control.
@param target element - the target input field or division or span
@return object - the associated instance data
@throws error if a jQuery problem getting data */
_getInst: function(target) {
try {
return $.data(target, PROP_NAME);
}
catch (err) {
throw 'Missing instance data for this datepicker';
}
},
/* Update the settings for a date picker attached to an input field or division.
@param target element - the target input field or division or span
@param name object - the new settings to update or
string - the name of the setting to change or
@param value any - the new value for the setting (omit if above is an object) */
_optionDatepicker: function(target, name, value) {
var settings = name || {};
if (typeof name == 'string') {
settings = {};
settings[name] = value;
}
var inst = this._getInst(target);
if (inst) {
if (this._curInst == inst) {
this._hideDatepicker(null);
}
extendRemove(inst.settings, settings);
var date = new Date();
extendRemove(inst, {rangeStart: null, // start of range
endDay: null, endMonth: null, endYear: null, // end of range
selectedDay: date.getDate(), selectedMonth: date.getMonth(),
selectedYear: date.getFullYear(), // starting point
currentDay: date.getDate(), currentMonth: date.getMonth(),
currentYear: date.getFullYear(), // current selection
drawMonth: date.getMonth(), drawYear: date.getFullYear()}); // month being drawn
this._updateDatepicker(inst);
}
},
// change method deprecated
_changeDatepicker: function(target, name, value) {
this._optionDatepicker(target, name, value);
},
/* Redraw the date picker attached to an input field or division.
@param target element - the target input field or division or span */
_refreshDatepicker: function(target) {
var inst = this._getInst(target);
if (inst) {
this._updateDatepicker(inst);
}
},
/* Set the dates for a jQuery selection.
@param target element - the target input field or division or span
@param date Date - the new date
@param endDate Date - the new end date for a range (optional) */
_setDateDatepicker: function(target, date, endDate) {
var inst = this._getInst(target);
if (inst) {
this._setDate(inst, date, endDate);
this._updateDatepicker(inst);
this._updateAlternate(inst);
}
},
/* Get the date(s) for the first entry in a jQuery selection.
@param target element - the target input field or division or span
@return Date - the current date or
Date[2] - the current dates for a range */
_getDateDatepicker: function(target) {
var inst = this._getInst(target);
if (inst && !inst.inline)
this._setDateFromField(inst);
return (inst ? this._getDate(inst) : null);
},
/* Handle keystrokes. */
_doKeyDown: function(event) {
var inst = $.datepicker._getInst(event.target);
var handled = true;
var isRTL = inst.dpDiv.is('.ui-datepicker-rtl');
inst._keyEvent = true;
if ($.datepicker._datepickerShowing)
switch (event.keyCode) {
case 9: $.datepicker._hideDatepicker(null, '');
break; // hide on tab out
case 13: var sel = $('td.' + $.datepicker._dayOverClass +
', td.' + $.datepicker._currentClass, inst.dpDiv);
if (sel[0])
$.datepicker._selectDay(event.target, inst.selectedMonth, inst.selectedYear, sel[0]);
else
$.datepicker._hideDatepicker(null, $.datepicker._get(inst, 'duration'));
return false; // don't submit the form
break; // select the value on enter
case 27: $.datepicker._hideDatepicker(null, $.datepicker._get(inst, 'duration'));
break; // hide on escape
case 33: $.datepicker._adjustDate(event.target, (event.ctrlKey ?
-$.datepicker._get(inst, 'stepBigMonths') :
-$.datepicker._get(inst, 'stepMonths')), 'M');
break; // previous month/year on page up/+ ctrl
case 34: $.datepicker._adjustDate(event.target, (event.ctrlKey ?
+$.datepicker._get(inst, 'stepBigMonths') :
+$.datepicker._get(inst, 'stepMonths')), 'M');
break; // next month/year on page down/+ ctrl
case 35: if (event.ctrlKey || event.metaKey) $.datepicker._clearDate(event.target);
handled = event.ctrlKey || event.metaKey;
break; // clear on ctrl or command +end
case 36: if (event.ctrlKey || event.metaKey) $.datepicker._gotoToday(event.target);
handled = event.ctrlKey || event.metaKey;
break; // current on ctrl or command +home
case 37: if (event.ctrlKey || event.metaKey) $.datepicker._adjustDate(event.target, (isRTL ? +1 : -1), 'D');
handled = event.ctrlKey || event.metaKey;
// -1 day on ctrl or command +left
if (event.originalEvent.altKey) $.datepicker._adjustDate(event.target, (event.ctrlKey ?
-$.datepicker._get(inst, 'stepBigMonths') :
-$.datepicker._get(inst, 'stepMonths')), 'M');
// next month/year on alt +left on Mac
break;
case 38: if (event.ctrlKey || event.metaKey) $.datepicker._adjustDate(event.target, -7, 'D');
handled = event.ctrlKey || event.metaKey;
break; // -1 week on ctrl or command +up
case 39: if (event.ctrlKey || event.metaKey) $.datepicker._adjustDate(event.target, (isRTL ? -1 : +1), 'D');
handled = event.ctrlKey || event.metaKey;
// +1 day on ctrl or command +right
if (event.originalEvent.altKey) $.datepicker._adjustDate(event.target, (event.ctrlKey ?
+$.datepicker._get(inst, 'stepBigMonths') :
+$.datepicker._get(inst, 'stepMonths')), 'M');
// next month/year on alt +right
break;
case 40: if (event.ctrlKey || event.metaKey) $.datepicker._adjustDate(event.target, +7, 'D');
handled = event.ctrlKey || event.metaKey;
break; // +1 week on ctrl or command +down
default: handled = false;
}
else if (event.keyCode == 36 && event.ctrlKey) // display the date picker on ctrl+home
$.datepicker._showDatepicker(this);
else {
handled = false;
}
if (handled) {
event.preventDefault();
event.stopPropagation();
}
},
/* Filter entered characters - based on date format. */
_doKeyPress: function(event) {
var inst = $.datepicker._getInst(event.target);
if ($.datepicker._get(inst, 'constrainInput')) {
var chars = $.datepicker._possibleChars($.datepicker._get(inst, 'dateFormat'));
var chr = String.fromCharCode(event.charCode == undefined ? event.keyCode : event.charCode);
return event.ctrlKey || (chr < ' ' || !chars || chars.indexOf(chr) > -1);
}
},
/* Pop-up the date picker for a given input field.
@param input element - the input field attached to the date picker or
event - if triggered by focus */
_showDatepicker: function(input) {
input = input.target || input;
if (input.nodeName.toLowerCase() != 'input') // find from button/image trigger
input = $('input', input.parentNode)[0];
if ($.datepicker._isDisabledDatepicker(input) || $.datepicker._lastInput == input) // already here
return;
var inst = $.datepicker._getInst(input);
var beforeShow = $.datepicker._get(inst, 'beforeShow');
extendRemove(inst.settings, (beforeShow ? beforeShow.apply(input, [input, inst]) : {}));
$.datepicker._hideDatepicker(null, '');
$.datepicker._lastInput = input;
$.datepicker._setDateFromField(inst);
if ($.datepicker._inDialog) // hide cursor
input.value = '';
if (!$.datepicker._pos) { // position below input
$.datepicker._pos = $.datepicker._findPos(input);
$.datepicker._pos[1] += input.offsetHeight; // add the height
}
var isFixed = false;
$(input).parents().each(function() {
isFixed |= $(this).css('position') == 'fixed';
return !isFixed;
});
if (isFixed && $.browser.opera) { // correction for Opera when fixed and scrolled
$.datepicker._pos[0] -= document.documentElement.scrollLeft;
$.datepicker._pos[1] -= document.documentElement.scrollTop;
}
var offset = {left: $.datepicker._pos[0], top: $.datepicker._pos[1]};
$.datepicker._pos = null;
inst.rangeStart = null;
// determine sizing offscreen
inst.dpDiv.css({position: 'absolute', display: 'block', top: '-1000px'});
$.datepicker._updateDatepicker(inst);
// fix width for dynamic number of date pickers
// and adjust position before showing
offset = $.datepicker._checkOffset(inst, offset, isFixed);
inst.dpDiv.css({position: ($.datepicker._inDialog && $.blockUI ?
'static' : (isFixed ? 'fixed' : 'absolute')), display: 'none',
left: offset.left + 'px', top: offset.top + 'px'});
if (!inst.inline) {
var showAnim = $.datepicker._get(inst, 'showAnim') || 'show';
var duration = $.datepicker._get(inst, 'duration');
var postProcess = function() {
$.datepicker._datepickerShowing = true;
if ($.browser.msie && parseInt($.browser.version,10) < 7) // fix IE < 7 select problems
$('iframe.ui-datepicker-cover').css({width: inst.dpDiv.width() + 4,
height: inst.dpDiv.height() + 4});
};
if ($.effects && $.effects[showAnim])
inst.dpDiv.show(showAnim, $.datepicker._get(inst, 'showOptions'), duration, postProcess);
else
inst.dpDiv[showAnim](duration, postProcess);
if (duration == '')
postProcess();
if (inst.input[0].type != 'hidden')
inst.input[0].focus();
$.datepicker._curInst = inst;
}
},
/* Generate the date picker content. */
_updateDatepicker: function(inst) {
var dims = {width: inst.dpDiv.width() + 4,
height: inst.dpDiv.height() + 4};
var self = this;
inst.dpDiv.empty().append(this._generateHTML(inst))
.find('iframe.ui-datepicker-cover').
css({width: dims.width, height: dims.height})
.end()
.find('button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a')
.bind('mouseout', function(){
$(this).removeClass('ui-state-hover');
if(this.className.indexOf('ui-datepicker-prev') != -1) $(this).removeClass('ui-datepicker-prev-hover');
if(this.className.indexOf('ui-datepicker-next') != -1) $(this).removeClass('ui-datepicker-next-hover');
})
.bind('mouseover', function(){
if (!self._isDisabledDatepicker( inst.inline ? inst.dpDiv.parent()[0] : inst.input[0])) {
$(this).parents('.ui-datepicker-calendar').find('a').removeClass('ui-state-hover');
$(this).addClass('ui-state-hover');
if(this.className.indexOf('ui-datepicker-prev') != -1) $(this).addClass('ui-datepicker-prev-hover');
if(this.className.indexOf('ui-datepicker-next') != -1) $(this).addClass('ui-datepicker-next-hover');
}
})
.end()
.find('.' + this._dayOverClass + ' a')
.trigger('mouseover')
.end();
var numMonths = this._getNumberOfMonths(inst);
var cols = numMonths[1];
var width = 17;
if (cols > 1) {
inst.dpDiv.addClass('ui-datepicker-multi-' + cols).css('width', (width * cols) + 'em');
} else {
inst.dpDiv.removeClass('ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4').width('');
}
inst.dpDiv[(numMonths[0] != 1 || numMonths[1] != 1 ? 'add' : 'remove') +
'Class']('ui-datepicker-multi');
inst.dpDiv[(this._get(inst, 'isRTL') ? 'add' : 'remove') +
'Class']('ui-datepicker-rtl');
if (inst.input && inst.input[0].type != 'hidden' && inst == $.datepicker._curInst)
$(inst.input[0]).focus();
},
/* Check positioning to remain on screen. */
_checkOffset: function(inst, offset, isFixed) {
var dpWidth = inst.dpDiv.outerWidth();
var dpHeight = inst.dpDiv.outerHeight();
var inputWidth = inst.input ? inst.input.outerWidth() : 0;
var inputHeight = inst.input ? inst.input.outerHeight() : 0;
var viewWidth = (window.innerWidth || document.documentElement.clientWidth || document.body.clientWidth) + $(document).scrollLeft();
var viewHeight = (window.innerHeight || document.documentElement.clientHeight || document.body.clientHeight) + $(document).scrollTop();
offset.left -= (this._get(inst, 'isRTL') ? (dpWidth - inputWidth) : 0);
offset.left -= (isFixed && offset.left == inst.input.offset().left) ? $(document).scrollLeft() : 0;
offset.top -= (isFixed && offset.top == (inst.input.offset().top + inputHeight)) ? $(document).scrollTop() : 0;
// now check if datepicker is showing outside window viewport - move to a better place if so.
offset.left -= (offset.left + dpWidth > viewWidth && viewWidth > dpWidth) ? Math.abs(offset.left + dpWidth - viewWidth) : 0;
offset.top -= (offset.top + dpHeight > viewHeight && viewHeight > dpHeight) ? Math.abs(offset.top + dpHeight + inputHeight*2 - viewHeight) : 0;
return offset;
},
/* Find an object's position on the screen. */
_findPos: function(obj) {
while (obj && (obj.type == 'hidden' || obj.nodeType != 1)) {
obj = obj.nextSibling;
}
var position = $(obj).offset();
return [position.left, position.top];
},
/* Hide the date picker from view.
@param input element - the input field attached to the date picker
@param duration string - the duration over which to close the date picker */
_hideDatepicker: function(input, duration) {
var inst = this._curInst;
if (!inst || (input && inst != $.data(input, PROP_NAME)))
return;
if (inst.stayOpen)
this._selectDate('#' + inst.id, this._formatDate(inst,
inst.currentDay, inst.currentMonth, inst.currentYear));
inst.stayOpen = false;
if (this._datepickerShowing) {
duration = (duration != null ? duration : this._get(inst, 'duration'));
var showAnim = this._get(inst, 'showAnim');
var postProcess = function() {
$.datepicker._tidyDialog(inst);
};
if (duration != '' && $.effects && $.effects[showAnim])
inst.dpDiv.hide(showAnim, $.datepicker._get(inst, 'showOptions'),
duration, postProcess);
else
inst.dpDiv[(duration == '' ? 'hide' : (showAnim == 'slideDown' ? 'slideUp' :
(showAnim == 'fadeIn' ? 'fadeOut' : 'hide')))](duration, postProcess);
if (duration == '')
this._tidyDialog(inst);
var onClose = this._get(inst, 'onClose');
if (onClose)
onClose.apply((inst.input ? inst.input[0] : null),
[(inst.input ? inst.input.val() : ''), inst]); // trigger custom callback
this._datepickerShowing = false;
this._lastInput = null;
if (this._inDialog) {
this._dialogInput.css({ position: 'absolute', left: '0', top: '-100px' });
if ($.blockUI) {
$.unblockUI();
$('body').append(this.dpDiv);
}
}
this._inDialog = false;
}
this._curInst = null;
},
/* Tidy up after a dialog display. */
_tidyDialog: function(inst) {
inst.dpDiv.removeClass(this._dialogClass).unbind('.ui-datepicker-calendar');
},
/* Close date picker if clicked elsewhere. */
_checkExternalClick: function(event) {
if (!$.datepicker._curInst)
return;
var $target = $(event.target);
if (($target.parents('#' + $.datepicker._mainDivId).length == 0) &&
!$target.hasClass($.datepicker.markerClassName) &&
!$target.hasClass($.datepicker._triggerClass) &&
$.datepicker._datepickerShowing && !($.datepicker._inDialog && $.blockUI))
$.datepicker._hideDatepicker(null, '');
},
/* Adjust one of the date sub-fields. */
_adjustDate: function(id, offset, period) {
var target = $(id);
var inst = this._getInst(target[0]);
if (this._isDisabledDatepicker(target[0])) {
return;
}
this._adjustInstDate(inst, offset +
(period == 'M' ? this._get(inst, 'showCurrentAtPos') : 0), // undo positioning
period);
this._updateDatepicker(inst);
},
/* Action for current link. */
_gotoToday: function(id) {
var target = $(id);
var inst = this._getInst(target[0]);
if (this._get(inst, 'gotoCurrent') && inst.currentDay) {
inst.selectedDay = inst.currentDay;
inst.drawMonth = inst.selectedMonth = inst.currentMonth;
inst.drawYear = inst.selectedYear = inst.currentYear;
}
else {
var date = new Date();
inst.selectedDay = date.getDate();
inst.drawMonth = inst.selectedMonth = date.getMonth();
inst.drawYear = inst.selectedYear = date.getFullYear();
}
this._notifyChange(inst);
this._adjustDate(target);
},
/* Action for selecting a new month/year. */
_selectMonthYear: function(id, select, period) {
var target = $(id);
var inst = this._getInst(target[0]);
inst._selectingMonthYear = false;
inst['selected' + (period == 'M' ? 'Month' : 'Year')] =
inst['draw' + (period == 'M' ? 'Month' : 'Year')] =
parseInt(select.options[select.selectedIndex].value,10);
this._notifyChange(inst);
this._adjustDate(target);
},
/* Restore input focus after not changing month/year. */
_clickMonthYear: function(id) {
var target = $(id);
var inst = this._getInst(target[0]);
if (inst.input && inst._selectingMonthYear && !$.browser.msie)
inst.input[0].focus();
inst._selectingMonthYear = !inst._selectingMonthYear;
},
/* Action for selecting a day. */
_selectDay: function(id, month, year, td) {
var target = $(id);
if ($(td).hasClass(this._unselectableClass) || this._isDisabledDatepicker(target[0])) {
return;
}
var inst = this._getInst(target[0]);
inst.selectedDay = inst.currentDay = $('a', td).html();
inst.selectedMonth = inst.currentMonth = month;
inst.selectedYear = inst.currentYear = year;
if (inst.stayOpen) {
inst.endDay = inst.endMonth = inst.endYear = null;
}
this._selectDate(id, this._formatDate(inst,
inst.currentDay, inst.currentMonth, inst.currentYear));
if (inst.stayOpen) {
inst.rangeStart = this._daylightSavingAdjust(
new Date(inst.currentYear, inst.currentMonth, inst.currentDay));
this._updateDatepicker(inst);
}
},
/* Erase the input field and hide the date picker. */
_clearDate: function(id) {
var target = $(id);
var inst = this._getInst(target[0]);
inst.stayOpen = false;
inst.endDay = inst.endMonth = inst.endYear = inst.rangeStart = null;
this._selectDate(target, '');
},
/* Update the input field with the selected date. */
_selectDate: function(id, dateStr) {
var target = $(id);
var inst = this._getInst(target[0]);
dateStr = (dateStr != null ? dateStr : this._formatDate(inst));
if (inst.input)
inst.input.val(dateStr);
this._updateAlternate(inst);
var onSelect = this._get(inst, 'onSelect');
if (onSelect)
onSelect.apply((inst.input ? inst.input[0] : null), [dateStr, inst]); // trigger custom callback
else if (inst.input)
inst.input.trigger('change'); // fire the change event
if (inst.inline)
this._updateDatepicker(inst);
else if (!inst.stayOpen) {
this._hideDatepicker(null, this._get(inst, 'duration'));
this._lastInput = inst.input[0];
if (typeof(inst.input[0]) != 'object')
inst.input[0].focus(); // restore focus
this._lastInput = null;
}
},
/* Update any alternate field to synchronise with the main field. */
_updateAlternate: function(inst) {
var altField = this._get(inst, 'altField');
if (altField) { // update alternate field too
var altFormat = this._get(inst, 'altFormat') || this._get(inst, 'dateFormat');
var date = this._getDate(inst);
dateStr = this.formatDate(altFormat, date, this._getFormatConfig(inst));
$(altField).each(function() { $(this).val(dateStr); });
}
},
/* Set as beforeShowDay function to prevent selection of weekends.
@param date Date - the date to customise
@return [boolean, string] - is this date selectable?, what is its CSS class? */
noWeekends: function(date) {
var day = date.getDay();
return [(day > 0 && day < 6), ''];
},
/* Set as calculateWeek to determine the week of the year based on the ISO 8601 definition.
@param date Date - the date to get the week for
@return number - the number of the week within the year that contains this date */
iso8601Week: function(date) {
var checkDate = new Date(date.getFullYear(), date.getMonth(), date.getDate());
var firstMon = new Date(checkDate.getFullYear(), 1 - 1, 4); // First week always contains 4 Jan
var firstDay = firstMon.getDay() || 7; // Day of week: Mon = 1, ..., Sun = 7
firstMon.setDate(firstMon.getDate() + 1 - firstDay); // Preceding Monday
if (firstDay < 4 && checkDate < firstMon) { // Adjust first three days in year if necessary
checkDate.setDate(checkDate.getDate() - 3); // Generate for previous year
return $.datepicker.iso8601Week(checkDate);
} else if (checkDate > new Date(checkDate.getFullYear(), 12 - 1, 28)) { // Check last three days in year
firstDay = new Date(checkDate.getFullYear() + 1, 1 - 1, 4).getDay() || 7;
if (firstDay > 4 && (checkDate.getDay() || 7) < firstDay - 3) { // Adjust if necessary
return 1;
}
}
return Math.floor(((checkDate - firstMon) / 86400000) / 7) + 1; // Weeks to given date
},
/* Parse a string value into a date object.
See formatDate below for the possible formats.
@param format string - the expected format of the date
@param value string - the date in the above format
@param settings Object - attributes include:
shortYearCutoff number - the cutoff year for determining the century (optional)
dayNamesShort string[7] - abbreviated names of the days from Sunday (optional)
dayNames string[7] - names of the days from Sunday (optional)
monthNamesShort string[12] - abbreviated names of the months (optional)
monthNames string[12] - names of the months (optional)
@return Date - the extracted date value or null if value is blank */
parseDate: function (format, value, settings) {
if (format == null || value == null)
throw 'Invalid arguments';
value = (typeof value == 'object' ? value.toString() : value + '');
if (value == '')
return null;
var shortYearCutoff = (settings ? settings.shortYearCutoff : null) || this._defaults.shortYearCutoff;
var dayNamesShort = (settings ? settings.dayNamesShort : null) || this._defaults.dayNamesShort;
var dayNames = (settings ? settings.dayNames : null) || this._defaults.dayNames;
var monthNamesShort = (settings ? settings.monthNamesShort : null) || this._defaults.monthNamesShort;
var monthNames = (settings ? settings.monthNames : null) || this._defaults.monthNames;
var year = -1;
var month = -1;
var day = -1;
var doy = -1;
var literal = false;
// Check whether a format character is doubled
var lookAhead = function(match) {
var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) == match);
if (matches)
iFormat++;
return matches;
};
// Extract a number from the string value
var getNumber = function(match) {
lookAhead(match);
var origSize = (match == '@' ? 14 : (match == 'y' ? 4 : (match == 'o' ? 3 : 2)));
var size = origSize;
var num = 0;
while (size > 0 && iValue < value.length &&
value.charAt(iValue) >= '0' && value.charAt(iValue) <= '9') {
num = num * 10 + parseInt(value.charAt(iValue++),10);
size--;
}
if (size == origSize)
throw 'Missing number at position ' + iValue;
return num;
};
// Extract a name from the string value and convert to an index
var getName = function(match, shortNames, longNames) {
var names = (lookAhead(match) ? longNames : shortNames);
var size = 0;
for (var j = 0; j < names.length; j++)
size = Math.max(size, names[j].length);
var name = '';
var iInit = iValue;
while (size > 0 && iValue < value.length) {
name += value.charAt(iValue++);
for (var i = 0; i < names.length; i++)
if (name == names[i])
return i + 1;
size--;
}
throw 'Unknown name at position ' + iInit;
};
// Confirm that a literal character matches the string value
var checkLiteral = function() {
if (value.charAt(iValue) != format.charAt(iFormat))
throw 'Unexpected literal at position ' + iValue;
iValue++;
};
var iValue = 0;
for (var iFormat = 0; iFormat < format.length; iFormat++) {
if (literal)
if (format.charAt(iFormat) == "'" && !lookAhead("'"))
literal = false;
else
checkLiteral();
else
switch (format.charAt(iFormat)) {
case 'd':
day = getNumber('d');
break;
case 'D':
getName('D', dayNamesShort, dayNames);
break;
case 'o':
doy = getNumber('o');
break;
case 'm':
month = getNumber('m');
break;
case 'M':
month = getName('M', monthNamesShort, monthNames);
break;
case 'y':
year = getNumber('y');
break;
case '@':
var date = new Date(getNumber('@'));
year = date.getFullYear();
month = date.getMonth() + 1;
day = date.getDate();
break;
case "'":
if (lookAhead("'"))
checkLiteral();
else
literal = true;
break;
default:
checkLiteral();
}
}
if (year == -1)
year = new Date().getFullYear();
else if (year < 100)
year += new Date().getFullYear() - new Date().getFullYear() % 100 +
(year <= shortYearCutoff ? 0 : -100);
if (doy > -1) {
month = 1;
day = doy;
do {
var dim = this._getDaysInMonth(year, month - 1);
if (day <= dim)
break;
month++;
day -= dim;
} while (true);
}
var date = this._daylightSavingAdjust(new Date(year, month - 1, day));
if (date.getFullYear() != year || date.getMonth() + 1 != month || date.getDate() != day)
throw 'Invalid date'; // E.g. 31/02/*
return date;
},
/* Standard date formats. */
ATOM: 'yy-mm-dd', // RFC 3339 (ISO 8601)
COOKIE: 'D, dd M yy',
ISO_8601: 'yy-mm-dd',
RFC_822: 'D, d M y',
RFC_850: 'DD, dd-M-y',
RFC_1036: 'D, d M y',
RFC_1123: 'D, d M yy',
RFC_2822: 'D, d M yy',
RSS: 'D, d M y', // RFC 822
TIMESTAMP: '@',
W3C: 'yy-mm-dd', // ISO 8601
/* Format a date object into a string value.
The format can be combinations of the following:
d - day of month (no leading zero)
dd - day of month (two digit)
o - day of year (no leading zeros)
oo - day of year (three digit)
D - day name short
DD - day name long
m - month of year (no leading zero)
mm - month of year (two digit)
M - month name short
MM - month name long
y - year (two digit)
yy - year (four digit)
@ - Unix timestamp (ms since 01/01/1970)
'...' - literal text
'' - single quote
@param format string - the desired format of the date
@param date Date - the date value to format
@param settings Object - attributes include:
dayNamesShort string[7] - abbreviated names of the days from Sunday (optional)
dayNames string[7] - names of the days from Sunday (optional)
monthNamesShort string[12] - abbreviated names of the months (optional)
monthNames string[12] - names of the months (optional)
@return string - the date in the above format */
formatDate: function (format, date, settings) {
if (!date)
return '';
var dayNamesShort = (settings ? settings.dayNamesShort : null) || this._defaults.dayNamesShort;
var dayNames = (settings ? settings.dayNames : null) || this._defaults.dayNames;
var monthNamesShort = (settings ? settings.monthNamesShort : null) || this._defaults.monthNamesShort;
var monthNames = (settings ? settings.monthNames : null) || this._defaults.monthNames;
// Check whether a format character is doubled
var lookAhead = function(match) {
var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) == match);
if (matches)
iFormat++;
return matches;
};
// Format a number, with leading zero if necessary
var formatNumber = function(match, value, len) {
var num = '' + value;
if (lookAhead(match))
while (num.length < len)
num = '0' + num;
return num;
};
// Format a name, short or long as requested
var formatName = function(match, value, shortNames, longNames) {
return (lookAhead(match) ? longNames[value] : shortNames[value]);
};
var output = '';
var literal = false;
if (date)
for (var iFormat = 0; iFormat < format.length; iFormat++) {
if (literal)
if (format.charAt(iFormat) == "'" && !lookAhead("'"))
literal = false;
else
output += format.charAt(iFormat);
else
switch (format.charAt(iFormat)) {
case 'd':
output += formatNumber('d', date.getDate(), 2);
break;
case 'D':
output += formatName('D', date.getDay(), dayNamesShort, dayNames);
break;
case 'o':
var doy = date.getDate();
for (var m = date.getMonth() - 1; m >= 0; m--)
doy += this._getDaysInMonth(date.getFullYear(), m);
output += formatNumber('o', doy, 3);
break;
case 'm':
output += formatNumber('m', date.getMonth() + 1, 2);
break;
case 'M':
output += formatName('M', date.getMonth(), monthNamesShort, monthNames);
break;
case 'y':
output += (lookAhead('y') ? date.getFullYear() :
(date.getYear() % 100 < 10 ? '0' : '') + date.getYear() % 100);
break;
case '@':
output += date.getTime();
break;
case "'":
if (lookAhead("'"))
output += "'";
else
literal = true;
break;
default:
output += format.charAt(iFormat);
}
}
return output;
},
/* Extract all possible characters from the date format. */
_possibleChars: function (format) {
var chars = '';
var literal = false;
for (var iFormat = 0; iFormat < format.length; iFormat++)
if (literal)
if (format.charAt(iFormat) == "'" && !lookAhead("'"))
literal = false;
else
chars += format.charAt(iFormat);
else
switch (format.charAt(iFormat)) {
case 'd': case 'm': case 'y': case '@':
chars += '0123456789';
break;
case 'D': case 'M':
return null; // Accept anything
case "'":
if (lookAhead("'"))
chars += "'";
else
literal = true;
break;
default:
chars += format.charAt(iFormat);
}
return chars;
},
/* Get a setting value, defaulting if necessary. */
_get: function(inst, name) {
return inst.settings[name] !== undefined ?
inst.settings[name] : this._defaults[name];
},
/* Parse existing date and initialise date picker. */
_setDateFromField: function(inst) {
var dateFormat = this._get(inst, 'dateFormat');
var dates = inst.input ? inst.input.val() : null;
inst.endDay = inst.endMonth = inst.endYear = null;
var date = defaultDate = this._getDefaultDate(inst);
var settings = this._getFormatConfig(inst);
try {
date = this.parseDate(dateFormat, dates, settings) || defaultDate;
} catch (event) {
this.log(event);
date = defaultDate;
}
inst.selectedDay = date.getDate();
inst.drawMonth = inst.selectedMonth = date.getMonth();
inst.drawYear = inst.selectedYear = date.getFullYear();
inst.currentDay = (dates ? date.getDate() : 0);
inst.currentMonth = (dates ? date.getMonth() : 0);
inst.currentYear = (dates ? date.getFullYear() : 0);
this._adjustInstDate(inst);
},
/* Retrieve the default date shown on opening. */
_getDefaultDate: function(inst) {
var date = this._determineDate(this._get(inst, 'defaultDate'), new Date());
var minDate = this._getMinMaxDate(inst, 'min', true);
var maxDate = this._getMinMaxDate(inst, 'max');
date = (minDate && date < minDate ? minDate : date);
date = (maxDate && date > maxDate ? maxDate : date);
return date;
},
/* A date may be specified as an exact value or a relative one. */
_determineDate: function(date, defaultDate) {
var offsetNumeric = function(offset) {
var date = new Date();
date.setDate(date.getDate() + offset);
return date;
};
var offsetString = function(offset, getDaysInMonth) {
var date = new Date();
var year = date.getFullYear();
var month = date.getMonth();
var day = date.getDate();
var pattern = /([+-]?[0-9]+)\s*(d|D|w|W|m|M|y|Y)?/g;
var matches = pattern.exec(offset);
while (matches) {
switch (matches[2] || 'd') {
case 'd' : case 'D' :
day += parseInt(matches[1],10); break;
case 'w' : case 'W' :
day += parseInt(matches[1],10) * 7; break;
case 'm' : case 'M' :
month += parseInt(matches[1],10);
day = Math.min(day, getDaysInMonth(year, month));
break;
case 'y': case 'Y' :
year += parseInt(matches[1],10);
day = Math.min(day, getDaysInMonth(year, month));
break;
}
matches = pattern.exec(offset);
}
return new Date(year, month, day);
};
date = (date == null ? defaultDate :
(typeof date == 'string' ? offsetString(date, this._getDaysInMonth) :
(typeof date == 'number' ? (isNaN(date) ? defaultDate : offsetNumeric(date)) : date)));
date = (date && date.toString() == 'Invalid Date' ? defaultDate : date);
if (date) {
date.setHours(0);
date.setMinutes(0);
date.setSeconds(0);
date.setMilliseconds(0);
}
return this._daylightSavingAdjust(date);
},
/* Handle switch to/from daylight saving.
Hours may be non-zero on daylight saving cut-over:
> 12 when midnight changeover, but then cannot generate
midnight datetime, so jump to 1AM, otherwise reset.
@param date (Date) the date to check
@return (Date) the corrected date */
_daylightSavingAdjust: function(date) {
if (!date) return null;
date.setHours(date.getHours() > 12 ? date.getHours() + 2 : 0);
return date;
},
/* Set the date(s) directly. */
_setDate: function(inst, date, endDate) {
var clear = !(date);
var origMonth = inst.selectedMonth;
var origYear = inst.selectedYear;
date = this._determineDate(date, new Date());
inst.selectedDay = inst.currentDay = date.getDate();
inst.drawMonth = inst.selectedMonth = inst.currentMonth = date.getMonth();
inst.drawYear = inst.selectedYear = inst.currentYear = date.getFullYear();
if (origMonth != inst.selectedMonth || origYear != inst.selectedYear)
this._notifyChange(inst);
this._adjustInstDate(inst);
if (inst.input) {
inst.input.val(clear ? '' : this._formatDate(inst));
}
},
/* Retrieve the date(s) directly. */
_getDate: function(inst) {
var startDate = (!inst.currentYear || (inst.input && inst.input.val() == '') ? null :
this._daylightSavingAdjust(new Date(
inst.currentYear, inst.currentMonth, inst.currentDay)));
return startDate;
},
/* Generate the HTML for the current state of the date picker. */
_generateHTML: function(inst) {
var today = new Date();
today = this._daylightSavingAdjust(
new Date(today.getFullYear(), today.getMonth(), today.getDate())); // clear time
var isRTL = this._get(inst, 'isRTL');
var showButtonPanel = this._get(inst, 'showButtonPanel');
var hideIfNoPrevNext = this._get(inst, 'hideIfNoPrevNext');
var navigationAsDateFormat = this._get(inst, 'navigationAsDateFormat');
var numMonths = this._getNumberOfMonths(inst);
var showCurrentAtPos = this._get(inst, 'showCurrentAtPos');
var stepMonths = this._get(inst, 'stepMonths');
var stepBigMonths = this._get(inst, 'stepBigMonths');
var isMultiMonth = (numMonths[0] != 1 || numMonths[1] != 1);
var currentDate = this._daylightSavingAdjust((!inst.currentDay ? new Date(9999, 9, 9) :
new Date(inst.currentYear, inst.currentMonth, inst.currentDay)));
var minDate = this._getMinMaxDate(inst, 'min', true);
var maxDate = this._getMinMaxDate(inst, 'max');
var drawMonth = inst.drawMonth - showCurrentAtPos;
var drawYear = inst.drawYear;
if (drawMonth < 0) {
drawMonth += 12;
drawYear--;
}
if (maxDate) {
var maxDraw = this._daylightSavingAdjust(new Date(maxDate.getFullYear(),
maxDate.getMonth() - numMonths[1] + 1, maxDate.getDate()));
maxDraw = (minDate && maxDraw < minDate ? minDate : maxDraw);
while (this._daylightSavingAdjust(new Date(drawYear, drawMonth, 1)) > maxDraw) {
drawMonth--;
if (drawMonth < 0) {
drawMonth = 11;
drawYear--;
}
}
}
inst.drawMonth = drawMonth;
inst.drawYear = drawYear;
var prevText = this._get(inst, 'prevText');
prevText = (!navigationAsDateFormat ? prevText : this.formatDate(prevText,
this._daylightSavingAdjust(new Date(drawYear, drawMonth - stepMonths, 1)),
this._getFormatConfig(inst)));
var prev = (this._canAdjustMonth(inst, -1, drawYear, drawMonth) ?
'<a class="ui-datepicker-prev ui-corner-all" onclick="DP_jQuery.datepicker._adjustDate(\'#' + inst.id + '\', -' + stepMonths + ', \'M\');"' +
' title="' + prevText + '"><span class="ui-icon ui-icon-circle-triangle-' + ( isRTL ? 'e' : 'w') + '">' + prevText + '</span></a>' :
(hideIfNoPrevNext ? '' : '<a class="ui-datepicker-prev ui-corner-all ui-state-disabled" title="'+ prevText +'"><span class="ui-icon ui-icon-circle-triangle-' + ( isRTL ? 'e' : 'w') + '">' + prevText + '</span></a>'));
var nextText = this._get(inst, 'nextText');
nextText = (!navigationAsDateFormat ? nextText : this.formatDate(nextText,
this._daylightSavingAdjust(new Date(drawYear, drawMonth + stepMonths, 1)),
this._getFormatConfig(inst)));
var next = (this._canAdjustMonth(inst, +1, drawYear, drawMonth) ?
'<a class="ui-datepicker-next ui-corner-all" onclick="DP_jQuery.datepicker._adjustDate(\'#' + inst.id + '\', +' + stepMonths + ', \'M\');"' +
' title="' + nextText + '"><span class="ui-icon ui-icon-circle-triangle-' + ( isRTL ? 'w' : 'e') + '">' + nextText + '</span></a>' :
(hideIfNoPrevNext ? '' : '<a class="ui-datepicker-next ui-corner-all ui-state-disabled" title="'+ nextText + '"><span class="ui-icon ui-icon-circle-triangle-' + ( isRTL ? 'w' : 'e') + '">' + nextText + '</span></a>'));
var currentText = this._get(inst, 'currentText');
var gotoDate = (this._get(inst, 'gotoCurrent') && inst.currentDay ? currentDate : today);
currentText = (!navigationAsDateFormat ? currentText :
this.formatDate(currentText, gotoDate, this._getFormatConfig(inst)));
var controls = (!inst.inline ? '<button type="button" class="ui-datepicker-close ui-state-default ui-priority-primary ui-corner-all" onclick="DP_jQuery.datepicker._hideDatepicker();">' + this._get(inst, 'closeText') + '</button>' : '');
var buttonPanel = (showButtonPanel) ? '<div class="ui-datepicker-buttonpane ui-widget-content">' + (isRTL ? controls : '') +
(this._isInRange(inst, gotoDate) ? '<button type="button" class="ui-datepicker-current ui-state-default ui-priority-secondary ui-corner-all" onclick="DP_jQuery.datepicker._gotoToday(\'#' + inst.id + '\');"' +
'>' + currentText + '</button>' : '') + (isRTL ? '' : controls) + '</div>' : '';
var firstDay = parseInt(this._get(inst, 'firstDay'),10);
firstDay = (isNaN(firstDay) ? 0 : firstDay);
var dayNames = this._get(inst, 'dayNames');
var dayNamesShort = this._get(inst, 'dayNamesShort');
var dayNamesMin = this._get(inst, 'dayNamesMin');
var monthNames = this._get(inst, 'monthNames');
var monthNamesShort = this._get(inst, 'monthNamesShort');
var beforeShowDay = this._get(inst, 'beforeShowDay');
var showOtherMonths = this._get(inst, 'showOtherMonths');
var calculateWeek = this._get(inst, 'calculateWeek') || this.iso8601Week;
var endDate = inst.endDay ? this._daylightSavingAdjust(
new Date(inst.endYear, inst.endMonth, inst.endDay)) : currentDate;
var defaultDate = this._getDefaultDate(inst);
var html = '';
for (var row = 0; row < numMonths[0]; row++) {
var group = '';
for (var col = 0; col < numMonths[1]; col++) {
var selectedDate = this._daylightSavingAdjust(new Date(drawYear, drawMonth, inst.selectedDay));
var cornerClass = ' ui-corner-all';
var calender = '';
if (isMultiMonth) {
calender += '<div class="ui-datepicker-group ui-datepicker-group-';
switch (col) {
case 0: calender += 'first'; cornerClass = ' ui-corner-' + (isRTL ? 'right' : 'left'); break;
case numMonths[1]-1: calender += 'last'; cornerClass = ' ui-corner-' + (isRTL ? 'left' : 'right'); break;
default: calender += 'middle'; cornerClass = ''; break;
}
calender += '">';
}
calender += '<div class="ui-datepicker-header ui-widget-header ui-helper-clearfix' + cornerClass + '">' +
(/all|left/.test(cornerClass) && row == 0 ? (isRTL ? next : prev) : '') +
(/all|right/.test(cornerClass) && row == 0 ? (isRTL ? prev : next) : '') +
this._generateMonthYearHeader(inst, drawMonth, drawYear, minDate, maxDate,
selectedDate, row > 0 || col > 0, monthNames, monthNamesShort) + // draw month headers
'</div><table class="ui-datepicker-calendar"><thead>' +
'<tr>';
var thead = '';
for (var dow = 0; dow < 7; dow++) { // days of the week
var day = (dow + firstDay) % 7;
thead += '<th' + ((dow + firstDay + 6) % 7 >= 5 ? ' class="ui-datepicker-week-end"' : '') + '>' +
'<span title="' + dayNames[day] + '">' + dayNamesMin[day] + '</span></th>';
}
calender += thead + '</tr></thead><tbody>';
var daysInMonth = this._getDaysInMonth(drawYear, drawMonth);
if (drawYear == inst.selectedYear && drawMonth == inst.selectedMonth)
inst.selectedDay = Math.min(inst.selectedDay, daysInMonth);
var leadDays = (this._getFirstDayOfMonth(drawYear, drawMonth) - firstDay + 7) % 7;
var numRows = (isMultiMonth ? 6 : Math.ceil((leadDays + daysInMonth) / 7)); // calculate the number of rows to generate
var printDate = this._daylightSavingAdjust(new Date(drawYear, drawMonth, 1 - leadDays));
for (var dRow = 0; dRow < numRows; dRow++) { // create date picker rows
calender += '<tr>';
var tbody = '';
for (var dow = 0; dow < 7; dow++) { // create date picker days
var daySettings = (beforeShowDay ?
beforeShowDay.apply((inst.input ? inst.input[0] : null), [printDate]) : [true, '']);
var otherMonth = (printDate.getMonth() != drawMonth);
var unselectable = otherMonth || !daySettings[0] ||
(minDate && printDate < minDate) || (maxDate && printDate > maxDate);
tbody += '<td class="' +
((dow + firstDay + 6) % 7 >= 5 ? ' ui-datepicker-week-end' : '') + // highlight weekends
(otherMonth ? ' ui-datepicker-other-month' : '') + // highlight days from other months
((printDate.getTime() == selectedDate.getTime() && drawMonth == inst.selectedMonth && inst._keyEvent) || // user pressed key
(defaultDate.getTime() == printDate.getTime() && defaultDate.getTime() == selectedDate.getTime()) ?
// or defaultDate is current printedDate and defaultDate is selectedDate
' ' + this._dayOverClass : '') + // highlight selected day
(unselectable ? ' ' + this._unselectableClass + ' ui-state-disabled': '') + // highlight unselectable days
(otherMonth && !showOtherMonths ? '' : ' ' + daySettings[1] + // highlight custom dates
(printDate.getTime() >= currentDate.getTime() && printDate.getTime() <= endDate.getTime() ? // in current range
' ' + this._currentClass : '') + // highlight selected day
(printDate.getTime() == today.getTime() ? ' ui-datepicker-today' : '')) + '"' + // highlight today (if different)
((!otherMonth || showOtherMonths) && daySettings[2] ? ' title="' + daySettings[2] + '"' : '') + // cell title
(unselectable ? '' : ' onclick="DP_jQuery.datepicker._selectDay(\'#' +
inst.id + '\',' + drawMonth + ',' + drawYear + ', this);return false;"') + '>' + // actions
(otherMonth ? (showOtherMonths ? printDate.getDate() : ' ') : // display for other months
(unselectable ? '<span class="ui-state-default">' + printDate.getDate() + '</span>' : '<a class="ui-state-default' +
(printDate.getTime() == today.getTime() ? ' ui-state-highlight' : '') +
(printDate.getTime() >= currentDate.getTime() && printDate.getTime() <= endDate.getTime() ? // in current range
' ui-state-active' : '') + // highlight selected day
'" href="#">' + printDate.getDate() + '</a>')) + '</td>'; // display for this month
printDate.setDate(printDate.getDate() + 1);
printDate = this._daylightSavingAdjust(printDate);
}
calender += tbody + '</tr>';
}
drawMonth++;
if (drawMonth > 11) {
drawMonth = 0;
drawYear++;
}
calender += '</tbody></table>' + (isMultiMonth ? '</div>' +
((numMonths[0] > 0 && col == numMonths[1]-1) ? '<div class="ui-datepicker-row-break"></div>' : '') : '');
group += calender;
}
html += group;
}
html += buttonPanel + ($.browser.msie && parseInt($.browser.version,10) < 7 && !inst.inline ?
'<iframe src="javascript:false;" class="ui-datepicker-cover" frameborder="0"></iframe>' : '');
inst._keyEvent = false;
return html;
},
/* Generate the month and year header. */
_generateMonthYearHeader: function(inst, drawMonth, drawYear, minDate, maxDate,
selectedDate, secondary, monthNames, monthNamesShort) {
minDate = (inst.rangeStart && minDate && selectedDate < minDate ? selectedDate : minDate);
var changeMonth = this._get(inst, 'changeMonth');
var changeYear = this._get(inst, 'changeYear');
var showMonthAfterYear = this._get(inst, 'showMonthAfterYear');
var html = '<div class="ui-datepicker-title">';
var monthHtml = '';
// month selection
if (secondary || !changeMonth)
monthHtml += '<span class="ui-datepicker-month">' + monthNames[drawMonth] + '</span> ';
else {
var inMinYear = (minDate && minDate.getFullYear() == drawYear);
var inMaxYear = (maxDate && maxDate.getFullYear() == drawYear);
monthHtml += '<select class="ui-datepicker-month" ' +
'onchange="DP_jQuery.datepicker._selectMonthYear(\'#' + inst.id + '\', this, \'M\');" ' +
'onclick="DP_jQuery.datepicker._clickMonthYear(\'#' + inst.id + '\');"' +
'>';
for (var month = 0; month < 12; month++) {
if ((!inMinYear || month >= minDate.getMonth()) &&
(!inMaxYear || month <= maxDate.getMonth()))
monthHtml += '<option value="' + month + '"' +
(month == drawMonth ? ' selected="selected"' : '') +
'>' + monthNamesShort[month] + '</option>';
}
monthHtml += '</select>';
}
if (!showMonthAfterYear)
html += monthHtml + ((secondary || changeMonth || changeYear) && (!(changeMonth && changeYear)) ? ' ' : '');
// year selection
if (secondary || !changeYear)
html += '<span class="ui-datepicker-year">' + drawYear + '</span>';
else {
// determine range of years to display
var years = this._get(inst, 'yearRange').split(':');
var year = 0;
var endYear = 0;
if (years.length != 2) {
year = drawYear - 10;
endYear = drawYear + 10;
} else if (years[0].charAt(0) == '+' || years[0].charAt(0) == '-') {
year = drawYear + parseInt(years[0], 10);
endYear = drawYear + parseInt(years[1], 10);
} else {
year = parseInt(years[0], 10);
endYear = parseInt(years[1], 10);
}
year = (minDate ? Math.max(year, minDate.getFullYear()) : year);
endYear = (maxDate ? Math.min(endYear, maxDate.getFullYear()) : endYear);
html += '<select class="ui-datepicker-year" ' +
'onchange="DP_jQuery.datepicker._selectMonthYear(\'#' + inst.id + '\', this, \'Y\');" ' +
'onclick="DP_jQuery.datepicker._clickMonthYear(\'#' + inst.id + '\');"' +
'>';
for (; year <= endYear; year++) {
html += '<option value="' + year + '"' +
(year == drawYear ? ' selected="selected"' : '') +
'>' + year + '</option>';
}
html += '</select>';
}
if (showMonthAfterYear)
html += (secondary || changeMonth || changeYear ? ' ' : '') + monthHtml;
html += '</div>'; // Close datepicker_header
return html;
},
/* Adjust one of the date sub-fields. */
_adjustInstDate: function(inst, offset, period) {
var year = inst.drawYear + (period == 'Y' ? offset : 0);
var month = inst.drawMonth + (period == 'M' ? offset : 0);
var day = Math.min(inst.selectedDay, this._getDaysInMonth(year, month)) +
(period == 'D' ? offset : 0);
var date = this._daylightSavingAdjust(new Date(year, month, day));
// ensure it is within the bounds set
var minDate = this._getMinMaxDate(inst, 'min', true);
var maxDate = this._getMinMaxDate(inst, 'max');
date = (minDate && date < minDate ? minDate : date);
date = (maxDate && date > maxDate ? maxDate : date);
inst.selectedDay = date.getDate();
inst.drawMonth = inst.selectedMonth = date.getMonth();
inst.drawYear = inst.selectedYear = date.getFullYear();
if (period == 'M' || period == 'Y')
this._notifyChange(inst);
},
/* Notify change of month/year. */
_notifyChange: function(inst) {
var onChange = this._get(inst, 'onChangeMonthYear');
if (onChange)
onChange.apply((inst.input ? inst.input[0] : null),
[inst.selectedYear, inst.selectedMonth + 1, inst]);
},
/* Determine the number of months to show. */
_getNumberOfMonths: function(inst) {
var numMonths = this._get(inst, 'numberOfMonths');
return (numMonths == null ? [1, 1] : (typeof numMonths == 'number' ? [1, numMonths] : numMonths));
},
/* Determine the current maximum date - ensure no time components are set - may be overridden for a range. */
_getMinMaxDate: function(inst, minMax, checkRange) {
var date = this._determineDate(this._get(inst, minMax + 'Date'), null);
return (!checkRange || !inst.rangeStart ? date :
(!date || inst.rangeStart > date ? inst.rangeStart : date));
},
/* Find the number of days in a given month. */
_getDaysInMonth: function(year, month) {
return 32 - new Date(year, month, 32).getDate();
},
/* Find the day of the week of the first of a month. */
_getFirstDayOfMonth: function(year, month) {
return new Date(year, month, 1).getDay();
},
/* Determines if we should allow a "next/prev" month display change. */
_canAdjustMonth: function(inst, offset, curYear, curMonth) {
var numMonths = this._getNumberOfMonths(inst);
var date = this._daylightSavingAdjust(new Date(
curYear, curMonth + (offset < 0 ? offset : numMonths[1]), 1));
if (offset < 0)
date.setDate(this._getDaysInMonth(date.getFullYear(), date.getMonth()));
return this._isInRange(inst, date);
},
/* Is the given date in the accepted range? */
_isInRange: function(inst, date) {
// during range selection, use minimum of selected date and range start
var newMinDate = (!inst.rangeStart ? null : this._daylightSavingAdjust(
new Date(inst.selectedYear, inst.selectedMonth, inst.selectedDay)));
newMinDate = (newMinDate && inst.rangeStart < newMinDate ? inst.rangeStart : newMinDate);
var minDate = newMinDate || this._getMinMaxDate(inst, 'min');
var maxDate = this._getMinMaxDate(inst, 'max');
return ((!minDate || date >= minDate) && (!maxDate || date <= maxDate));
},
/* Provide the configuration settings for formatting/parsing. */
_getFormatConfig: function(inst) {
var shortYearCutoff = this._get(inst, 'shortYearCutoff');
shortYearCutoff = (typeof shortYearCutoff != 'string' ? shortYearCutoff :
new Date().getFullYear() % 100 + parseInt(shortYearCutoff, 10));
return {shortYearCutoff: shortYearCutoff,
dayNamesShort: this._get(inst, 'dayNamesShort'), dayNames: this._get(inst, 'dayNames'),
monthNamesShort: this._get(inst, 'monthNamesShort'), monthNames: this._get(inst, 'monthNames')};
},
/* Format the given date for display. */
_formatDate: function(inst, day, month, year) {
if (!day) {
inst.currentDay = inst.selectedDay;
inst.currentMonth = inst.selectedMonth;
inst.currentYear = inst.selectedYear;
}
var date = (day ? (typeof day == 'object' ? day :
this._daylightSavingAdjust(new Date(year, month, day))) :
this._daylightSavingAdjust(new Date(inst.currentYear, inst.currentMonth, inst.currentDay)));
return this.formatDate(this._get(inst, 'dateFormat'), date, this._getFormatConfig(inst));
}
});
/* jQuery extend now ignores nulls! */
function extendRemove(target, props) {
$.extend(target, props);
for (var name in props)
if (props[name] == null || props[name] == undefined)
target[name] = props[name];
return target;
};
/* Determine whether an object is an array. */
function isArray(a) {
return (a && (($.browser.safari && typeof a == 'object' && a.length) ||
(a.constructor && a.constructor.toString().match(/\Array\(\)/))));
};
/* Invoke the datepicker functionality.
@param options string - a command, optionally followed by additional parameters or
Object - settings for attaching new datepicker functionality
@return jQuery object */
$.fn.datepicker = function(options){
/* Initialise the date picker. */
if (!$.datepicker.initialized) {
$(document).mousedown($.datepicker._checkExternalClick).
find('body').append($.datepicker.dpDiv);
$.datepicker.initialized = true;
}
var otherArgs = Array.prototype.slice.call(arguments, 1);
if (typeof options == 'string' && (options == 'isDisabled' || options == 'getDate'))
return $.datepicker['_' + options + 'Datepicker'].
apply($.datepicker, [this[0]].concat(otherArgs));
return this.each(function() {
typeof options == 'string' ?
$.datepicker['_' + options + 'Datepicker'].
apply($.datepicker, [this].concat(otherArgs)) :
$.datepicker._attachDatepicker(this, options);
});
};
$.datepicker = new Datepicker(); // singleton instance
$.datepicker.initialized = false;
$.datepicker.uuid = new Date().getTime();
$.datepicker.version = "1.7";
// Workaround for #4055
// Add another global to avoid noConflict issues with inline event handlers
window.DP_jQuery = $;
})(jQuery); | PypiClean |
/MeteorTools-2023.9.0-py3-none-any.whl/meteortools/utils/plotTrack.py |
import csv
from matplotlib import pyplot as plt
import numpy as np
from ..utils import greatCircleDistance
def trackToDistvsHeight(trackcsvfile):
"""
Plot a distance vs height graph from the supplied CSV file
Arguments:
trackcsvfile: [str] full path to a CSV file containing columns of lat, long, height, time
Returns:
nothing, but it creates a PNG in the source folder containing the track plot
"""
inputfile = csv.reader(open(trackcsvfile))
dists = []
alts = []
lat0 = -99 # use impossible value
lng0 = 0
for row in inputfile:
#columns are lat, long, height, times
if row[0] == 'lats':
continue
if lat0 == -99:
lat0 = np.radians(float(row[0]))
lng0 = np.radians(float(row[1]))
dist = 0
else:
lat = np.radians(float(row[0]))
lng = np.radians(float(row[1]))
dist = greatCircleDistance(lat0, lng0, lat, lng)
dists.append(dist)
alt = float(row[2])/1000
alts.append(alt)
plt.clf()
plt.plot(dists,alts)
outname = trackcsvfile.replace('.csv','_dist_alt.png')
plt.savefig(outname)
plt.close()
def trackToTimevsVelocity(trackcsvfile):
"""
Plot a distance vs velocity graph from the supplied CSV file
Arguments:
trackcsvfile: [str] full path to a CSV file containing columns of lat, long, height, time
Returns:
nothing, but it creates a PNG in the source folder containing the track plot
"""
inputfile = csv.reader(open(trackcsvfile))
dists = []
tims = []
lat0 = -99 # use impossible value
lng0 = 0
for row in inputfile:
#columns are lat, long, height, times
if row[0] == 'lats':
continue
if lat0 == -99:
lat0 = np.radians(float(row[0]))
lng0 = np.radians(float(row[1]))
dist = 0
else:
lat = np.radians(float(row[0]))
lng = np.radians(float(row[1]))
dist = greatCircleDistance(lat0, lng0, lat, lng)
dists.append(dist)
tim = float(row[3])
tims.append(tim)
vels=[]
vels.append(0)
for i in range(1,len(tims)):
vel = (dists[i]-dists[i-1])/(tims[i]-tims[i-1])
print(vel)
vels.append(vel)
plt.clf()
plt.plot(tims,vels)
outname = trackcsvfile.replace('.csv','_tim_vel.png')
plt.savefig(outname)
plt.close()
def trackToTimevsHeight(trackcsvfile):
"""
Plot a time vs height graph from the supplied CSV file
Arguments:
trackcsvfile: [str] full path to a CSV file containing columns of lat, long, height, time
Returns:
nothing, but it creates a PNG in the source folder containing the track plot
"""
inputfile = csv.reader(open(trackcsvfile))
tims = []
alts = []
for row in inputfile:
#columns are lat, long, height, times
if row[0] == 'lats':
continue
tims.append(float(row[3]))
alts.append(float(row[2])/1000)
plt.clf()
plt.plot(tims,alts)
outname = trackcsvfile.replace('.csv','_time_alt.png')
plt.savefig(outname)
plt.close() | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/basic/lang/bg.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['bg']={"editor":"Редактор за форматиран текст","editorPanel":"Панел на текстовия редактор","common":{"editorHelp":"натиснете ALT+0 за помощ","browseServer":"Избор от сървъра","url":"URL адрес","protocol":"Протокол","upload":"Качване","uploadSubmit":"Изпращане към сървъра","image":"Изображение","flash":"Флаш","form":"Форма","checkbox":"Поле за избор","radio":"Радио бутон","textField":"Текстово поле","textarea":"Текстова зона","hiddenField":"Скрито поле","button":"Бутон","select":"Поле за избор","imageButton":"Бутон за изображение","notSet":"<не е избрано>","id":"ID","name":"Име","langDir":"Посока на езика","langDirLtr":"От ляво надясно (LTR)","langDirRtl":"От дясно наляво (RTL)","langCode":"Код на езика","longDescr":"Уеб адрес за дълго описание","cssClass":"Класове за CSS","advisoryTitle":"Заглавие","cssStyle":"Стил","ok":"ОК","cancel":"Отказ","close":"Затвори","preview":"Преглед","resize":"Влачете за да оразмерите","generalTab":"Общи","advancedTab":"Разширено","validateNumberFailed":"Тази стойност не е число","confirmNewPage":"Всички незапазени промени ще бъдат изгубени. Сигурни ли сте, че желаете да заредите нова страница?","confirmCancel":"Някои от опциите са променени. Сигурни ли сте, че желаете да затворите прозореца?","options":"Опции","target":"Цел","targetNew":"Нов прозорец (_blank)","targetTop":"Най-горният прозорец (_top)","targetSelf":"Текущият прозорец (_self)","targetParent":"Горният прозорец (_parent)","langDirLTR":"От ляво надясно (LTR)","langDirRTL":"От дясно наляво (RTL)","styles":"Стил","cssClasses":"Класове за CSS","width":"Ширина","height":"Височина","align":"Подравняване","left":"Ляво","right":"Дясно","center":"Център","justify":"Двустранно","alignLeft":"Подравни ляво","alignRight":"Подравни дясно","alignCenter":"Подравни център","alignTop":"Горе","alignMiddle":"По средата","alignBottom":"Долу","alignNone":"Без подравняване","invalidValue":"Невалидна стойност.","invalidHeight":"Височината трябва да е число.","invalidWidth":"Ширина трябва да е число.","invalidLength":"Стойността на полето \"%1\" трябва да е положително число с или без валидна мерна единица (%2).","invalidCssLength":"Стойността на полето \"%1\" трябва да е положително число с или без валидна CSS мерна единица (px, %, in, cm, mm, em, ex, pt, или pc).","invalidHtmlLength":"Стойността на полето \"%1\" трябва да е положително число с или без валидна HTML мерна единица (px или %).","invalidInlineStyle":"Стойността на стилa трябва да съдържат една или повече двойки във формат \"name : value\", разделени с двоеточие.","cssLengthTooltip":"Въведете числена стойност в пиксели или друга валидна CSS единица (px, %, in, cm, mm, em, ex, pt, или pc).","unavailable":"%1<span class=\"cke_accessibility\">, недостъпно</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Space","35":"End","36":"Home","46":"Delete","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Клавишна комбинация","optionDefault":"По подразбиране"},"about":{"copy":"Авторско право © $1. Всички права запазени.","dlgTitle":"Относно CKEditor 4","moreInfo":"За лицензионна информация моля посетете сайта ни:"},"basicstyles":{"bold":"Удебелен","italic":"Наклонен","strike":"Зачертан текст","subscript":"Долен индекс","superscript":"Горен индекс","underline":"Подчертан"},"notification":{"closed":"Известието е затворено."},"toolbar":{"toolbarCollapse":"Свиване на лентата с инструменти","toolbarExpand":"Разширяване на лентата с инструменти","toolbarGroups":{"document":"Документ","clipboard":"Клипборд/Отмяна","editing":"Редакция","forms":"Форми","basicstyles":"Базови стилове","paragraph":"Параграф","links":"Връзки","insert":"Вмъкване","styles":"Стилове","colors":"Цветове","tools":"Инструменти"},"toolbars":"Ленти с инструменти"},"clipboard":{"copy":"Копирай","copyError":"Настройките за сигурност на вашия бразуър не разрешават на редактора да изпълни действията по копиране. За целта използвайте клавиатурата (Ctrl+C).","cut":"Отрежи","cutError":"Настройките за сигурност на вашия браузър не позволяват на редактора автоматично да изъплни действията за отрязване. За целта използвайте клавиатурата (Ctrl+X).","paste":"Вмъкни","pasteNotification":"Натиснете %1 за да вмъкнете. Вашият браузър не поддържа поставяне с бутон от лентата с инструменти или от контекстното меню.","pasteArea":"Зона за поставяне","pasteMsg":"Поставете съдържанието в зоната отдолу и натиснете OK."},"indent":{"indent":"Увеличаване на отстъпа","outdent":"Намаляване на отстъпа"},"fakeobjects":{"anchor":"Кука","flash":"Флаш анимация","hiddenfield":"Скрито поле","iframe":"IFrame","unknown":"Неизвестен обект"},"link":{"acccessKey":"Клавиш за достъп","advanced":"Разширено","advisoryContentType":"Тип на съдържанието","advisoryTitle":"Заглавие","anchor":{"toolbar":"Котва","menu":"Промяна на котва","title":"Настройки на котва","name":"Име на котва","errorName":"Моля въведете име на котвата","remove":"Премахване на котва"},"anchorId":"По ID на елемент","anchorName":"По име на котва","charset":"Езиков код на свързания ресурс","cssClasses":"CSS класове","download":"Укажи изтегляне","displayText":"Текст за показване","emailAddress":"Имейл aдрес","emailBody":"Съдържание","emailSubject":"Тема","id":"Id","info":"Връзка","langCode":"Езиков код","langDir":"Посока на езика","langDirLTR":"От ляво надясно (LTR)","langDirRTL":"От дясно наляво (RTL)","menu":"Промяна на връзка","name":"Име","noAnchors":"(Няма котви в текущия документ)","noEmail":"Моля въведете имейл адрес","noUrl":"Моля въведете URL адрес","noTel":"Please type the phone number","other":"<друго>","phoneNumber":"Phone number","popupDependent":"Зависимост (Netscape)","popupFeatures":"Функции на изкачащ прозорец","popupFullScreen":"Цял екран (IE)","popupLeft":"Лява позиция","popupLocationBar":"Лента с локацията","popupMenuBar":"Лента за меню","popupResizable":"Оразмеряем","popupScrollBars":"Ленти за прелистване","popupStatusBar":"Статусна лента","popupToolbar":"Лента с инструменти","popupTop":"Горна позиция","rel":"Свързаност (rel атрибут)","selectAnchor":"Изберете котва","styles":"Стил","tabIndex":"Ред на достъп","target":"Цел","targetFrame":"<frame>","targetFrameName":"Име на целевия прозорец","targetPopup":"<изкачащ прозорец>","targetPopupName":"Име на изкачащ прозорец","title":"Връзка","toAnchor":"Връзка към котва в текста","toEmail":"Имейл","toUrl":"Уеб адрес","toPhone":"Phone","toolbar":"Връзка","type":"Тип на връзката","unlink":"Премахни връзката","upload":"Качване"},"list":{"bulletedlist":"Вмъкване/премахване на точков списък","numberedlist":"Вмъкване/премахване на номериран списък"},"undo":{"redo":"Пренаправи","undo":"Отмени"}}; | PypiClean |
/ORCID-Hub-4.16.6.tar.gz/ORCID-Hub-4.16.6/orcid_hub/models.py | """Application models."""
import copy
import csv
import json
import jsonschema
import os
import random
import re
import secrets
import string
import uuid
from collections import namedtuple
from datetime import datetime
from enum import IntFlag, IntEnum
from hashlib import md5
from io import StringIO
from itertools import groupby, zip_longest
from urllib.parse import urlencode
import validators
import yaml
from flask_login import UserMixin, current_user
from peewee import JOIN, BlobField
from peewee import BooleanField as BooleanField_
from peewee import (CharField, DateTimeField, DeferredRelation, Field, FixedCharField,
ForeignKeyField, IntegerField, Model, OperationalError, PostgresqlDatabase,
SmallIntegerField, TextField, fn)
from peewee_validates import ModelValidator
from playhouse.shortcuts import model_to_dict
from pycountry import countries
from pykwalify.core import Core
from pykwalify.errors import SchemaError
from . import app, db
from .schemas import affiliation_task_schema, researcher_url_task_schema, other_name_task_schema
ENV = app.config["ENV"]
DEFAULT_COUNTRY = app.config["DEFAULT_COUNTRY"]
SCHEMA_DIR = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "schemas"))
ORCID_ID_REGEX = re.compile(r"^([X\d]{4}-?){3}[X\d]{4}$")
PARTIAL_DATE_REGEX = re.compile(r"\d+([/\-]\d+){,2}")
AFFILIATION_TYPES = (
"student",
"education",
"staff",
"employment",
)
class ModelException(Exception):
"""Application model exception."""
pass
class NestedDict(dict):
"""Helper for traversing a nested dictionaries."""
def get(self, *keys, default=None):
"""To get the value from uploaded fields."""
d = self
for k in keys:
if d is default:
break
if not isinstance(d, dict):
return default
d = super(NestedDict, d).get(k, default)
return d
def validate_orcid_id(value):
"""Validate ORCID iD (both format and the check-sum)."""
if not value:
return
if not ORCID_ID_REGEX.match(value):
raise ValueError(
f"Invalid ORCID iD {value}. It should be in the form of 'xxxx-xxxx-xxxx-xxxx' where x is a digit."
)
check = 0
for n in value:
if n == '-':
continue
check = (2 * check + int(10 if n == 'X' else n)) % 11
if check != 1:
raise ValueError(f"Invalid ORCID iD {value} checksum. Make sure you have entered correct ORCID iD.")
def lazy_property(fn):
"""Make a property lazy-evaluated."""
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
class PartialDate(namedtuple("PartialDate", ["year", "month", "day"])):
"""Partial date (without month day or both month and month day."""
def as_orcid_dict(self):
"""Return ORCID dictionary representation of the partial date."""
if self.year is None and self.month is None and self.day is None:
return None
return dict(((f, None if v is None else {
"value": ("%04d" if f == "year" else "%02d") % v
}) for (f, v) in zip(self._fields, self)))
@classmethod
def create(cls, value):
"""Create a partial date form ORCID dictionary representation or string.
>>> PartialDate.create({"year": {"value": "2003"}}).as_orcid_dict()
{'year': {'value': '2003'}, 'month': None, 'day': None}
>>> PartialDate.create({"year": {"value": "2003"}}).year
2003
>>> PartialDate.create("2003").year
2003
>>> PartialDate.create("2003-03")
2003-03
>>> PartialDate.create("2003-07-14")
2003-07-14
>>> PartialDate.create("2003/03")
2003-03
>>> PartialDate.create("2003/07/14")
2003-07-14
>>> PartialDate.create("03/2003")
2003-03
>>> PartialDate.create("14/07/2003")
2003-07-14
"""
if value is None or value == {}:
return None
if isinstance(value, str):
match = PARTIAL_DATE_REGEX.search(value)
if not match:
raise ModelException(f"Wrong partial date value '{value}'")
value0 = match[0]
if '/' in value0:
parts = value0.split('/')
return cls(*[int(v) for v in (parts[::-1] if len(parts[-1]) > 2 else parts)])
return cls(*[int(v) for v in value0.split('-')])
return cls(**{k: int(v.get("value")) if v else None for k, v in value.items()})
def as_datetime(self):
"""Get 'datetime' data representation."""
return datetime(self.year, self.month, self.day)
def __str__(self):
"""Get string representation."""
if self.year is None:
return ''
else:
res = "%04d" % int(self.year)
if self.month:
res += "-%02d" % int(self.month)
return res + "-%02d" % int(self.day) if self.day else res
PartialDate.__new__.__defaults__ = (None, ) * len(PartialDate._fields)
class OrcidIdField(FixedCharField):
"""ORCID iD value DB field."""
def __init__(self, *args, **kwargs):
"""Initialize ORCID iD data field."""
if "verbose_name" not in kwargs:
kwargs["verbose_name"] = "ORCID iD"
if "max_length" not in kwargs:
kwargs["max_length"] = 19
super().__init__(*args, **kwargs)
# TODO: figure out where to place the value validation...
# def coerce(self, value):
# validate_orcid_id(value)
# return super().coerce(value)
class BooleanField(BooleanField_):
"""BooleanField extension to support inversion in queries."""
def NOT(self): # noqa: N802
"""Negate logical value in SQL."""
return self.__invert__()
class PartialDateField(Field):
"""Partial date custom DB data field mapped to varchar(10)."""
db_field = "varchar(10)"
def db_value(self, value):
"""Convert into partial ISO date textual representation: YYYY-**-**, YYYY-MM-**, or YYYY-MM-DD."""
if value is None or not value.year:
return None
res = "%04d" % int(value.year)
if value.month:
res += "-%02d" % int(value.month)
else:
return res + "-**-**"
return res + "-%02d" % int(value.day) if value.day else res + "-**"
def python_value(self, value):
"""Parse partial ISO date textual representation."""
if value is None:
return None
parts = [int(p) for p in value.split("-") if "*" not in p]
return PartialDate(**dict(zip_longest((
"year",
"month",
"day",
), parts)))
class TaskType(IntEnum):
"""Enum used to represent Task type."""
NONE = 0
AFFILIATION = 4 # Affilation of employment/education
FUNDING = 1 # Funding
WORK = 2
PEER_REVIEW = 3
RESEARCHER_URL = 5
OTHER_NAME = 6
SYNC = 11
def __eq__(self, other):
if isinstance(other, TaskType):
return self.value == other.value
elif isinstance(other, int):
return self.value == other
return (self.name == other or self.name == getattr(other, "name", None))
def __hash__(self):
return hash(self.name)
@classmethod
def options(cls):
"""Get list of all types for UI dropown option list."""
return [(e, e.name.replace('_', ' ').title()) for e in cls]
class TaskTypeField(SmallIntegerField):
"""Partial date custom DB data field mapped to varchar(10)."""
def db_value(self, value):
"""Change enum value to small int."""
if value is None:
return None
try:
if isinstance(value, TaskType):
return value.value
elif isinstance(value, int):
return value
elif isinstance(value, str):
if str.isdigit(value):
return int(value)
return TaskType[value.upper()].value
else:
raise ValueError("Unknow TaskType: '%s'", value)
except:
app.logger.exception("Failed to coerce the TaskType value, choosing NULL.")
return None
def python_value(self, value):
"""Parse partial ISO date textual representation."""
if value is None:
return None
try:
return TaskType(value)
except:
app.logger.exception(
f"Failed to map DB value {value} to TaskType, choosing None.")
return None
class Role(IntFlag):
"""
Enum used to represent user role.
The model provide multi role support representing role sets as bitmaps.
"""
NONE = 0 # NONE
SUPERUSER = 1 # SuperUser
ADMIN = 2 # Admin
RESEARCHER = 4 # Researcher
TECHNICAL = 8 # Technical contact
ANY = 255 # ANY
def __eq__(self, other):
if isinstance(other, Role):
return self.value == other.value
return (self.name == other or self.name == getattr(other, 'name', None))
def __hash__(self):
return hash(self.name)
class Affiliation(IntFlag):
"""
Enum used to represent user affiliation (type) to the organisation.
The model provide multiple affiliations support representing role sets as bitmaps.
"""
NONE = 0 # NONE
EDU = 1 # Education
EMP = 2 # Employment
def __eq__(self, other):
if isinstance(other, Affiliation):
return self.value == other.value
return (self.name == other or self.name == getattr(other, 'name', None))
def __hash__(self):
return hash(self.name)
def __str__(self):
return ", ".join({
self.EDU: "Education",
self.EMP: "Employment"
}[a] for a in Affiliation if a & self)
class BaseModel(Model):
"""Encapsulate common bits and pieces of the model classes."""
def field_is_updated(self, field_name):
"""Test if field is 'dirty'."""
return any(field_name == f.name for f in self.dirty_fields)
@classmethod
def get(cls, *query, **kwargs):
"""Get a single model instance."""
if query and not kwargs and len(query) == 1 and isinstance(query[0], (int, str, )):
return super().get(id=query[0])
elif not query and not kwargs:
return super().select().limit(1).first()
return super().get(*query, **kwargs)
@classmethod
def model_class_name(cls):
"""Get the class name of the model."""
return cls._meta.name
def __to_dashes(self, o):
"""Replace '_' with '-' in the dict keys."""
if isinstance(o, dict):
return {k.replace('_', '-'): self.__to_dashes(v) for k, v in o.items()}
return o
def to_dict(self,
to_dashes=False,
exclude_nulls=False,
recurse=True,
backrefs=False,
only=None,
exclude=None,
seen=None,
extra_attrs=None,
fields_from_query=None,
max_depth=None):
"""Get dictionary representation of the model."""
o = model_to_dict(
self,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
seen=seen,
extra_attrs=extra_attrs,
fields_from_query=fields_from_query,
max_depth=max_depth)
if exclude_nulls:
o = {k: v for (k, v) in o.items() if v is not None}
for k, v in o.items():
if isinstance(v, PartialDate):
o[k] = str(v)
elif k == "task_type":
o[k] = v.name
if to_dashes:
return self.__to_dashes(o)
return o
def reload(self):
"""Refresh the object from the DB."""
newer_self = self.get(self._meta.primary_key == self._get_pk_value())
for field_name in self._meta.fields.keys():
val = getattr(newer_self, field_name)
setattr(self, field_name, val)
self._dirty.clear()
class Meta: # noqa: D101,D106
database = db
only_save_dirty = True
class ModelDeferredRelation(DeferredRelation):
"""Fixed DefferedRelation to allow inheritance and mixins."""
def set_model(self, rel_model):
"""Include model in the generated "related_name" to make it unique."""
for model, field, name in self.fields:
if isinstance(field, ForeignKeyField) and not field._related_name:
field._related_name = "%s_%s_set" % (model.model_class_name(), name)
super().set_model(rel_model)
DeferredUser = ModelDeferredRelation()
class AuditMixin(Model):
"""Mixing for getting data necessary for data change audit trail maintenance."""
created_at = DateTimeField(default=datetime.utcnow)
updated_at = DateTimeField(null=True, default=None)
is_deleted = BooleanField(null=True, default=False)
# created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
# updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
def save(self, *args, **kwargs): # noqa: D102
if self.is_dirty() and self._dirty != {"orcid_updated_at"}:
self.updated_at = datetime.utcnow()
if current_user and hasattr(current_user, "id"):
if hasattr(self, "created_by") and self.created_by and hasattr(self, "updated_by"):
self.updated_by_id = current_user.id
elif hasattr(self, "created_by"):
self.created_by_id = current_user.id
return super().save(*args, **kwargs)
def delete_instance(self, *args, **kwargs): # noqa: D102
"""Mark the entry id_deleted and save (with the link to the user
that invoked the deletion) for audit trail.
"""
self.is_deleted = True
self.save()
return super().delete_instance(*args, **kwargs)
class File(BaseModel):
"""Uploaded image files."""
filename = CharField(max_length=100)
data = BlobField()
mimetype = CharField(max_length=30, db_column="mime_type")
token = FixedCharField(max_length=8, unique=True, default=lambda: secrets.token_urlsafe(8)[:8])
class Meta: # noqa: D101,D106
table_alias = "f"
class Organisation(BaseModel, AuditMixin):
"""Research organisation."""
country_choices = [(c.alpha_2, c.name) for c in countries]
country_choices.sort(key=lambda e: e[1])
country_choices.insert(0, ("", "Country"))
name = CharField(max_length=100, unique=True, null=True)
tuakiri_name = CharField(max_length=80, unique=True, null=True)
if ENV != "prod":
orcid_client_id = CharField(max_length=80, null=True)
orcid_secret = CharField(max_length=80, null=True)
else: # pragma: no cover
orcid_client_id = CharField(max_length=80, unique=True, null=True)
orcid_secret = CharField(max_length=80, unique=True, null=True)
confirmed = BooleanField(default=False)
city = CharField(null=True)
state = CharField(null=True, verbose_name="State/Region", max_length=100)
country = CharField(null=True, choices=country_choices, default=DEFAULT_COUNTRY)
disambiguated_id = CharField(null=True)
disambiguation_source = CharField(null=True)
is_email_sent = BooleanField(default=False)
tech_contact = ForeignKeyField(
DeferredUser,
related_name="tech_contact_of",
on_delete="SET NULL",
null=True,
help_text="Organisation technical contact")
created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
api_credentials_requested_at = DateTimeField(
null=True,
help_text="The time stamp when the user clicked on the button to register client API.")
api_credentials_entered_at = DateTimeField(
null=True, help_text="The time stamp when the user entered API Client ID and secret.")
can_use_api = BooleanField(null=True, help_text="The organisation can access ORCID Hub API.")
logo = ForeignKeyField(
File, on_delete="CASCADE", null=True, help_text="The logo of the organisation")
email_template = TextField(null=True)
email_template_enabled = BooleanField(null=True, default=False)
webhook_enabled = BooleanField(default=False, null=True)
webhook_url = CharField(max_length=100, null=True)
email_notifications_enabled = BooleanField(default=False, null=True)
notification_email = CharField(max_length=100, null=True, verbose_name="Notification Email Address")
@property
def invitation_sent_at(self):
"""Get the timestamp of the most recent invitation sent to the technical contact."""
row = self.orginvitation_set.select(
fn.MAX(OrgInvitation.created_at).alias("last_sent_at")).where(
OrgInvitation.invitee_id == self.tech_contact_id).first()
if row:
return row.last_sent_at
@property
def invitation_confirmed_at(self):
"""Get the timestamp when the invitation link was opened."""
row = self.orginvitation_set.select(
fn.MAX(OrgInvitation.created_at).alias("last_confirmed_at")).where(
OrgInvitation.invitee_id == self.tech_contact_id).where(
OrgInvitation.confirmed_at.is_null(False)).first()
if row:
return row.last_confirmed_at
@property
def users(self):
"""Get organisation's user query."""
return User.select().join(
UserOrg, on=(UserOrg.user_id == User.id)).where(UserOrg.org == self)
@property
def admins(self):
"""Get organisation's administrator query."""
return self.users.where(UserOrg.is_admin)
def __repr__(self):
return self.name or self.tuakiri_name
def save(self, *args, **kwargs):
"""Handle data consistency validation and saving."""
if self.is_dirty():
if self.name is None:
self.name = self.tuakiri_name
if self.field_is_updated("tech_contact") and self.tech_contact:
if not self.tech_contact.has_role(Role.TECHNICAL):
self.tech_contact.roles |= Role.TECHNICAL
self.tech_contact.save()
app.logger.info(f"Added TECHNICAL role to user {self.tech_contact}")
super().save(*args, **kwargs)
class Meta: # noqa: D101,D106
table_alias = "o"
class OrgInfo(BaseModel):
"""Preloaded organisation data."""
name = CharField(max_length=100, unique=True, verbose_name="Organisation")
tuakiri_name = CharField(max_length=100, unique=True, null=True, verbose_name="TUAKIRI Name")
title = CharField(null=True, verbose_name="Contact Person Tile")
first_name = CharField(null=True, verbose_name="Contact Person's First Name")
last_name = CharField(null=True, verbose_name="Contact Person's Last Name")
role = CharField(null=True, verbose_name="Contact Person's Role")
email = CharField(null=True, verbose_name="Contact Person's Email Address")
phone = CharField(null=True, verbose_name="Contact Person's Phone")
is_public = BooleanField(
null=True, default=False, help_text="Permission to post contact information to WEB")
country = CharField(null=True, verbose_name="Country Code", default=DEFAULT_COUNTRY)
city = CharField(null=True, verbose_name="City of Home Campus")
disambiguated_id = CharField(
null=True, verbose_name="common:disambiguated-organization-identifier")
disambiguation_source = CharField(null=True, verbose_name="common:disambiguation-source")
def __repr__(self):
return self.name or self.disambiguated_id or super().__repr__()
class Meta: # noqa: D101,D106
db_table = "org_info"
table_alias = "oi"
@classmethod
def load_from_csv(cls, source):
"""Load data from CSV file or a string."""
if isinstance(source, str):
source = StringIO(source)
reader = csv.reader(source)
header = next(reader)
assert len(header) >= 3, \
"Wrong number of fields. Expected at least 3 fields " \
"(name, disambiguated organisation ID, and disambiguation source). " \
"Read header: %s" % header
header_rexs = [
re.compile(ex, re.I)
for ex in ("organisation|name", "title", r"first\s*(name)?", r"last\s*(name)?", "role",
"email", "phone", "public|permission to post to web", r"country\s*(code)?",
"city", "(common:)?disambiguated.*identifier",
"(common:)?disambiguation.*source", r"tuakiri\s*(name)?")
]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
def val(row, i, default=None):
if idxs[i] is None:
return default
else:
v = row[idxs[i]].strip()
return None if v == '' else v
for row in reader:
# skip empty lines:
if not row or row is None or len(row) == 0 or (len(row) == 1 and row[0].strip() == ''):
continue
name = val(row, 0)
oi, _ = cls.get_or_create(name=name)
oi.title = val(row, 1)
oi.first_name = val(row, 2)
oi.last_name = val(row, 3)
oi.role = val(row, 4)
oi.email = val(row, 5)
oi.phone = val(row, 6)
oi.is_public = val(row, 7) and val(row, 7).upper() == "YES"
oi.country = val(row, 8) or DEFAULT_COUNTRY
oi.city = val(row, 9)
oi.disambiguated_id = val(row, 10)
oi.disambiguation_source = val(row, 11)
oi.tuakiri_name = val(row, 12)
oi.save()
return reader.line_num - 1
class User(BaseModel, UserMixin, AuditMixin):
"""
ORCiD Hub user.
It's a generic user including researchers, organisation administrators, hub administrators, etc.
"""
name = CharField(max_length=64, null=True)
first_name = CharField(null=True, verbose_name="First Name")
last_name = CharField(null=True, verbose_name="Last Name")
email = CharField(max_length=120, unique=True, null=True, verbose_name="Email Address")
eppn = CharField(max_length=120, unique=True, null=True, verbose_name="EPPN")
orcid = OrcidIdField(null=True, verbose_name="ORCID iD", help_text="User's ORCID iD")
confirmed = BooleanField(default=False)
# Role bit-map:
roles = SmallIntegerField(default=0)
is_locked = BooleanField(default=False)
webhook_enabled = BooleanField(default=False, null=True)
orcid_updated_at = DateTimeField(null=True, default=None)
# TODO: many-to-many
# NB! Deprecated!
# TODO: we still need to remember the organisation that last authenticated the user
organisation = ForeignKeyField(
Organisation, related_name="members", on_delete="SET NULL", null=True)
created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
def __repr__(self):
if self.name and (self.eppn or self.email):
return "%s (%s)" % (self.name, self.email or self.eppn)
return self.name or self.email or self.orcid or super().__repr__()
@property
def username(self):
"""Usename for comlying with Flask-Login API"""
return self.orcid or self.email
@property
def organisations(self):
"""Get all linked to the user organisation query."""
return (Organisation.select(
Organisation, (Organisation.tech_contact_id == self.id).alias("is_tech_contact"),
((UserOrg.is_admin.is_null(False)) & (UserOrg.is_admin)).alias("is_admin")).join(
UserOrg, on=((UserOrg.org_id == Organisation.id) & (UserOrg.user_id == self.id)))
.naive())
@lazy_property
def org_links(self):
"""Get all user organisation linked directly and indirectly."""
if self.orcid:
q = UserOrg.select().join(
User,
on=((User.id == UserOrg.user_id)
& ((User.email == self.email)
| (User.orcid == self.orcid)))).where((UserOrg.user_id == self.id)
| (User.email == self.email)
| (User.orcid == self.orcid))
else:
q = self.userorg_set
return [
r for r in q.select(UserOrg.id, UserOrg.org_id, Organisation.name.alias("org_name"))
.join(Organisation, on=(
Organisation.id == UserOrg.org_id)).order_by(Organisation.name).naive()
]
@property
def available_organisations(self):
"""Get all not yet linked to the user organisation query."""
return (Organisation.select(Organisation).where(UserOrg.id.is_null()).join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.org_id == Organisation.id) & (UserOrg.user_id == self.id))))
@property
def admin_for(self):
"""Get organisations the user is admin for (query)."""
return self.organisations.where(UserOrg.is_admin)
@property
def is_active(self):
"""Get 'is_active' based on confirmed for Flask-Login.
TODO: confirmed - user that email is confirmed either by IdP or by confirmation email
isn't the same as "is active".
"""
return self.confirmed
def has_role(self, role):
"""Return `True` if the user identifies with the specified role.
:param role: A role name, `Role` instance, or integer value.
"""
if isinstance(role, Role):
return bool(role & Role(self.roles))
elif isinstance(role, str):
try:
return bool(Role[role.upper()] & Role(self.roles))
except Exception:
False
elif type(role) is int:
return bool(role & self.roles)
else:
return False
@property
def is_superuser(self):
"""Test if the user is a HUB admin."""
return bool(self.roles & Role.SUPERUSER)
@is_superuser.setter
def is_superuser(self, value): # noqa: D401
"""Sets user as a HUB admin."""
if value:
self.roles |= Role.SUPERUSER.value
else:
self.roles &= ~Role.SUPERUSER.value
@property
def is_admin(self):
"""Test if the user belongs to the organisation admin."""
return bool(self.roles & Role.ADMIN)
def avatar(self, size=40, default="identicon"):
"""Return Gravatar service user avatar URL."""
# TODO: default gravatar image
# default = "https://www.example.com/default.jpg"
gravatar_url = "https://www.gravatar.com/avatar/" + md5(
self.email.lower().encode()).hexdigest() + "?"
gravatar_url += urlencode({'d': default, 's': str(size)})
return gravatar_url
@property
def gravatar_profile_url(self):
"""Return Gravatar service user profile URL."""
return "https://www.gravatar.com/" + md5(self.email.lower().encode()).hexdigest()
@property
def affiliations(self):
"""Return affiliations with the current organisation."""
try:
user_org = UserOrg.get(user=self, org=self.organisation)
return Affiliation(user_org.affiliations)
except UserOrg.DoesNotExist:
return Affiliation.NONE
def is_tech_contact_of(self, org=None):
"""Indicate if the user is the technical contact of the organisation."""
if org is None:
org = self.organisation
return org and org.tech_contact and org.tech_contact_id == self.id
def is_admin_of(self, org=None):
"""Indicate if the user is the technical contact of the organisation."""
if org is None:
org = self.organisation
return org and UserOrg.select().where(UserOrg.user == self, UserOrg.org == org, UserOrg.is_admin).exists()
@property
def uuid(self):
"""Generate UUID for the user based on the primary email."""
return uuid.uuid5(uuid.NAMESPACE_URL, "mailto:" + (self.email or self.eppn))
DeferredUser.set_model(User)
class OrgInvitation(BaseModel, AuditMixin):
"""Organisation invitation to on-board the Hub."""
invitee = ForeignKeyField(
User, on_delete="CASCADE", null=True, related_name="received_org_invitations")
inviter = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="sent_org_invitations")
org = ForeignKeyField(Organisation, on_delete="SET NULL", verbose_name="Organisation")
email = TextField(
help_text="The email address the invitation was sent to.",
verbose_name="Invitee Email Address")
token = TextField(unique=True)
confirmed_at = DateTimeField(null=True)
tech_contact = BooleanField(
null=True,
help_text="The invitee is the technical contact of the organisation.",
verbose_name="Is Tech.contact")
url = CharField(null=True)
@property
def sent_at(self):
"""Get the time the invitation was sent."""
return self.created_at
class Meta: # noqa: D101,D106
db_table = "org_invitation"
table_alias = "oi"
class UserOrg(BaseModel, AuditMixin):
"""Linking object for many-to-many relationship."""
user = ForeignKeyField(User, on_delete="CASCADE", index=True)
org = ForeignKeyField(
Organisation, on_delete="CASCADE", index=True, verbose_name="Organisation")
is_admin = BooleanField(
null=True, default=False, help_text="User is an administrator for the organisation")
# Affiliation bit-map:
affiliations = SmallIntegerField(default=0, null=True, verbose_name="EDU Person Affiliations")
created_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="created_user_orgs")
updated_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="updated_user_orgs")
# TODO: the access token should be either here or in a separate list
# access_token = CharField(max_length=120, unique=True, null=True)
def save(self, *args, **kwargs):
"""Enforce foreign key constraints and consolidate user roles with the linked organisations.
Enforce foreign key constraints and consolidate user roles with the linked organisations
before saving data.
"""
if self.is_dirty():
# if self.field_is_updated("org"):
# self.org # just enforce re-querying
user = self.user
if self.is_admin != user.is_admin:
if self.is_admin or UserOrg.select().where((UserOrg.user_id == self.user_id) & (
UserOrg.org_id != self.org_id) & UserOrg.is_admin).exists(): # noqa: E125
user.roles |= Role.ADMIN
app.logger.info(f"Added ADMIN role to user {user}")
else:
user.roles &= ~Role.ADMIN
app.logger.info(f"Revoked ADMIN role from user {user}")
user.save()
return super().save(*args, **kwargs)
class Meta: # noqa: D101,D106
db_table = "user_org"
table_alias = "uo"
indexes = ((("user", "org"), True), )
class OrcidToken(BaseModel, AuditMixin):
"""For Keeping ORCID token in the table."""
user = ForeignKeyField(
User, null=True, index=True,
on_delete="CASCADE") # TODO: add validation for 3-legged authorization tokens
org = ForeignKeyField(Organisation, index=True, verbose_name="Organisation")
scope = TextField(null=True) # TODO implement property
access_token = CharField(max_length=36, unique=True, null=True)
issue_time = DateTimeField(default=datetime.utcnow)
refresh_token = CharField(max_length=36, unique=True, null=True)
expires_in = IntegerField(default=0)
created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
@property
def scopes(self): # noqa: D102
if self.scope:
return self.scope.split(',')
return []
@scopes.setter
def scopes(self, value): # noqa: D102
if isinstance(value, str):
self.scope = value
else:
self.scope = ','.join(value)
class Meta: # noqa: D101,D106
db_table = "orcid_token"
table_alias = "ot"
class UserOrgAffiliation(BaseModel, AuditMixin):
"""For Keeping the information about the affiliation."""
user = ForeignKeyField(User, on_delete="CASCADE")
organisation = ForeignKeyField(
Organisation, index=True, on_delete="CASCADE", verbose_name="Organisation")
disambiguated_id = CharField(verbose_name="Disambiguation ORG Id", null=True)
disambiguation_source = CharField(verbose_name="Disambiguation ORG Source", null=True)
name = TextField(null=True, verbose_name="Institution/employer")
start_date = PartialDateField(null=True)
end_date = PartialDateField(null=True)
department_name = TextField(null=True)
department_city = TextField(null=True)
role_title = TextField(null=True)
put_code = IntegerField(null=True)
path = TextField(null=True)
created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
class Meta: # noqa: D101,D106
db_table = "user_organisation_affiliation"
table_alias = "oua"
class OrcidApiCall(BaseModel):
"""ORCID API call audit entry."""
called_at = DateTimeField(default=datetime.utcnow)
user = ForeignKeyField(User, null=True, on_delete="SET NULL")
method = TextField()
url = TextField()
query_params = TextField(null=True)
body = TextField(null=True)
put_code = IntegerField(null=True)
response = TextField(null=True)
response_time_ms = IntegerField(null=True)
class Meta: # noqa: D101,D106
db_table = "orcid_api_call"
table_alias = "oac"
class OrcidAuthorizeCall(BaseModel):
"""ORCID Authorize call audit entry."""
called_at = DateTimeField(default=datetime.utcnow)
user = ForeignKeyField(User, null=True, default=None, on_delete="SET NULL")
method = TextField(null=True, default="GET")
url = TextField(null=True)
token = TextField(null=True)
state = TextField(null=True)
response_time_ms = IntegerField(null=True)
class Meta: # noqa: D101,D106
db_table = "orcid_authorize_call"
table_alias = "oac"
class Task(BaseModel, AuditMixin):
"""Batch processing task created form CSV/TSV file."""
org = ForeignKeyField(
Organisation, index=True, verbose_name="Organisation", on_delete="CASCADE")
completed_at = DateTimeField(null=True)
filename = TextField(null=True)
created_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="created_tasks")
updated_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="updated_tasks")
task_type = TaskTypeField(default=TaskType.NONE)
expires_at = DateTimeField(null=True)
expiry_email_sent_at = DateTimeField(null=True)
completed_count = TextField(null=True, help_text="gives the status of uploaded task")
def __repr__(self):
return ("Synchronization task" if self.task_type == TaskType.SYNC else (
self.filename
or f"{TaskType(self.task_type).name.capitalize()} record processing task #{self.id}"))
@property
def is_expiry_email_sent(self):
"""Test if the expiry email is sent ot not."""
return bool(self.expiry_email_sent_at)
@lazy_property
def record_count(self):
"""Get count of the loaded recoreds."""
return 0 if self.records is None else self.records.count()
@property
def record_model(self):
"""Get record model class."""
if self.records is not None:
_, models = self.records.get_query_meta()
model, = models.keys()
return model
return None
@lazy_property
def records(self):
"""Get all task record query."""
if self.task_type in [TaskType.SYNC, TaskType.NONE]:
return None
return getattr(self, self.task_type.name.lower() + "_records")
@lazy_property
def completed_count(self):
"""Get number of completed rows."""
return self.records.where(self.record_model.processed_at.is_null(False)).count()
@lazy_property
def completed_percent(self):
"""Get the percentage of completed rows."""
return (100. * self.completed_count) / self.record_count if self.record_count else 0.
@property
def error_count(self):
"""Get error count encountered during processing batch task."""
q = self.records
_, models = q.get_query_meta()
model, = models.keys()
return self.records.where(self.record_model.status ** "%error%").count()
# TODO: move this one to AffiliationRecord
@classmethod
def load_from_csv(cls, source, filename=None, org=None):
"""Load affiliation record data from CSV/TSV file or a string."""
if isinstance(source, str):
source = StringIO(source)
reader = csv.reader(source)
header = next(reader)
if filename is None:
if hasattr(source, "name"):
filename = source.name
else:
filename = datetime.utcnow().isoformat(timespec="seconds")
if len(header) == 1 and '\t' in header[0]:
source.seek(0)
reader = csv.reader(source, delimiter='\t')
header = next(reader)
if len(header) < 2:
raise ModelException("Expected CSV or TSV format file.")
if len(header) < 4:
raise ModelException(
"Wrong number of fields. Expected at least 4 fields "
"(first name, last name, email address or another unique identifier, student/staff). "
f"Read header: {header}")
header_rexs = [
re.compile(ex, re.I)
for ex in (r"first\s*(name)?", r"last\s*(name)?", "email", "organisation|^name",
"campus|department", "city", "state|region", "course|title|role",
r"start\s*(date)?", r"end\s*(date)?",
r"affiliation(s)?\s*(type)?|student|staff", "country", r"disambiguat.*id",
r"disambiguat.*source", r"put|code", "orcid.*", "external.*|.*identifier")
]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
if all(idx is None for idx in idxs):
raise ModelException(f"Failed to map fields based on the header of the file: {header}")
if org is None:
org = current_user.organisation if current_user else None
def val(row, i, default=None):
if idxs[i] is None or idxs[i] >= len(row):
return default
else:
v = row[idxs[i]].strip()
return default if v == '' else v
with db.atomic():
try:
task = cls.create(org=org, filename=filename, task_type=TaskType.AFFILIATION)
for row_no, row in enumerate(reader):
# skip empty lines:
if len(row) == 0:
continue
if len(row) == 1 and row[0].strip() == '':
continue
email = val(row, 2, "").lower()
orcid = val(row, 15)
external_id = val(row, 16)
if not email and not orcid and external_id and validators.email(external_id):
# if email is missing and external ID is given as a valid email, use it:
email = external_id
# The uploaded country must be from ISO 3166-1 alpha-2
country = val(row, 11)
if country:
try:
country = countries.lookup(country).alpha_2
except Exception:
raise ModelException(
f" (Country must be 2 character from ISO 3166-1 alpha-2) in the row "
f"#{row_no+2}: {row}. Header: {header}")
if not (email or orcid):
raise ModelException(
f"Missing user identifier (email address or ORCID iD) in the row "
f"#{row_no+2}: {row}. Header: {header}")
if orcid:
validate_orcid_id(orcid)
if not email or not validators.email(email):
raise ValueError(
f"Invalid email address '{email}' in the row #{row_no+2}: {row}")
affiliation_type = val(row, 10, "").lower()
if not affiliation_type or affiliation_type not in AFFILIATION_TYPES:
raise ValueError(
f"Invalid affiliation type '{affiliation_type}' in the row #{row_no+2}: {row}. "
f"Expected values: {', '.join(at for at in AFFILIATION_TYPES)}.")
first_name = val(row, 0)
last_name = val(row, 1)
if not(first_name and last_name):
raise ModelException(
"Wrong number of fields. Expected at least 4 fields "
"(first name, last name, email address or another unique identifier, "
f"student/staff): {row}")
af = AffiliationRecord(
task=task,
first_name=first_name,
last_name=last_name,
email=email,
organisation=val(row, 3),
department=val(row, 4),
city=val(row, 5),
region=val(row, 6),
role=val(row, 7),
start_date=PartialDate.create(val(row, 8)),
end_date=PartialDate.create(val(row, 9)),
affiliation_type=affiliation_type,
country=country,
disambiguated_id=val(row, 12),
disambiguation_source=val(row, 13),
put_code=val(row, 14),
orcid=orcid,
external_id=external_id)
validator = ModelValidator(af)
if not validator.validate():
raise ModelException(f"Invalid record: {validator.errors}")
af.save()
except Exception:
db.rollback()
app.logger.exception("Failed to load affiliation file.")
raise
return task
def to_dict(self, to_dashes=True, recurse=False, exclude=None, include_records=True):
"""Create a dict represenatation of the task suitable for serialization into JSON or YAML."""
# TODO: expand for the othe types of the tasks
task_dict = super().to_dict(
recurse=False if recurse is None else recurse,
to_dashes=to_dashes,
exclude=exclude,
only=[Task.id, Task.filename, Task.task_type, Task.created_at, Task.updated_at])
# TODO: refactor for funding task to get records here not in API or export
if include_records and TaskType(self.task_type) != TaskType.FUNDING:
task_dict["records"] = [
r.to_dict(
to_dashes=to_dashes,
recurse=recurse,
exclude=[self.records.model_class._meta.fields["task"]]) for r in self.records
]
return task_dict
def to_export_dict(self):
"""Create a dictionary representation for export."""
if self.task_type == TaskType.AFFILIATION:
task_dict = self.to_dict()
else:
task_dict = self.to_dict(
recurse=False,
to_dashes=True,
include_records=False,
exclude=[Task.created_by, Task.updated_by, Task.org, Task.task_type])
task_dict["task-type"] = self.task_type.name
task_dict["records"] = [r.to_export_dict() for r in self.records]
return task_dict
class Meta: # noqa: D101,D106
table_alias = "t"
class Log(BaseModel):
"""Task log entries."""
created_at = DateTimeField(default=datetime.utcnow)
created_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="created_task_log_entries")
task = ForeignKeyField(
Task,
on_delete="CASCADE",
null=True,
index=True,
verbose_name="Task",
related_name="log_entries")
message = TextField(null=True)
class Meta: # noqa: D101,D106
table_alias = "l"
def save(self, *args, **kwargs): # noqa: D102
if self.is_dirty():
if current_user and hasattr(current_user, "id"):
if hasattr(self, "created_by"):
self.created_by_id = current_user.id
return super().save(*args, **kwargs)
class UserInvitation(BaseModel, AuditMixin):
"""Organisation invitation to on-board the Hub."""
invitee = ForeignKeyField(
User, on_delete="CASCADE", null=True, related_name="received_user_invitations")
inviter = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="sent_user_invitations")
org = ForeignKeyField(
Organisation, on_delete="CASCADE", null=True, verbose_name="Organisation")
task = ForeignKeyField(Task, on_delete="CASCADE", null=True, index=True, verbose_name="Task")
email = CharField(
index=True, null=True, max_length=80,
help_text="The email address the invitation was sent to.")
first_name = TextField(null=True, verbose_name="First Name")
last_name = TextField(null=True, verbose_name="Last Name")
orcid = OrcidIdField(null=True)
department = TextField(verbose_name="Campus/Department", null=True)
organisation = TextField(verbose_name="Organisation Name", null=True)
city = TextField(verbose_name="City", null=True)
state = TextField(verbose_name="State", null=True)
country = CharField(verbose_name="Country", max_length=2, null=True)
course_or_role = TextField(verbose_name="Course or Job title", null=True)
start_date = PartialDateField(verbose_name="Start date", null=True)
end_date = PartialDateField(verbose_name="End date (leave blank if current)", null=True)
affiliations = SmallIntegerField(verbose_name="User affiliations", null=True)
disambiguated_id = TextField(verbose_name="Disambiguation ORG Id", null=True)
disambiguation_source = TextField(verbose_name="Disambiguation ORG Source", null=True)
token = TextField(unique=True)
confirmed_at = DateTimeField(null=True)
is_person_update_invite = BooleanField(default=False)
@property
def sent_at(self):
"""Get the time the invitation was sent."""
return self.created_at
class Meta: # noqa: D101,D106
db_table = "user_invitation"
class RecordModel(BaseModel):
"""Common model bits of the task records."""
def save(self, *args, **kwargs):
"""Update related batch task when changing the record."""
if self.is_dirty() and hasattr(self, "task"):
self.task.updated_at = datetime.utcnow()
self.task.save()
return super().save(*args, **kwargs)
def add_status_line(self, line):
"""Add a text line to the status for logging processing progress."""
ts = datetime.utcnow().isoformat(timespec="seconds")
self.status = (self.status + "\n" if self.status else '') + ts + ": " + line
@classmethod
def get_field_regxes(cls):
"""Return map of compiled field name regex to the model fields."""
return {f: re.compile(e, re.I) for (f, e) in cls._field_regex_map}
def to_export_dict(self):
"""Map the common record parts to dict for export into JSON/YAML."""
org = self.task.org
d = {"type": self.type}
if hasattr(self, "org_name"):
d["organization"] = {
"disambiguated-organization": {
"disambiguated-organization-identifier":
self.disambiguated_org_identifier or org.disambiguated_org_identifier,
"disambiguation-source":
self.disambiguation_source or org.disambiguation_source,
},
"name": self.org_name or org.name,
"address": {
"city": self.city or org.city,
"region": self.region or org.state,
"country": self.country or org.country,
},
}
if hasattr(self, "title"):
d["title"] = {
"title": {
"value": self.title,
},
"translated-title": {
"value": self.translated_title,
"language-code": self.translated_title_language_code,
}
}
if hasattr(self, "invitees") and self.invitees:
d["invitees"] = [r.to_export_dict() for r in self.invitees]
if hasattr(self, "contributors") and self.contributors:
d["contributors"] = {"contributor": [r.to_export_dict() for r in self.contributors]}
if hasattr(self, "external_ids") and self.external_ids:
d["external-ids"] = {"external-id": [r.to_export_dict() for r in self.external_ids]}
if hasattr(self, "start_date") and self.start_date:
d["start-date"] = self.start_date.as_orcid_dict()
if hasattr(self, "end_date") and self.end_date:
d["end-date"] = self.end_date.as_orcid_dict()
return d
class GroupIdRecord(RecordModel):
"""GroupID records."""
type_choices = [('publisher', 'publisher'), ('institution', 'institution'), ('journal', 'journal'),
('conference', 'conference'), ('newspaper', 'newspaper'), ('newsletter', 'newsletter'),
('magazine', 'magazine'), ('peer-review service', 'peer-review service')]
type_choices.sort(key=lambda e: e[1])
type_choices.insert(0, ("", ""))
put_code = IntegerField(null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
name = CharField(max_length=120,
help_text="The name of the group. This can be the name of a journal (Journal of Criminal Justice),"
" a publisher (Society of Criminal Justice), or non-specific description (Legal Journal)"
" as required.")
group_id = CharField(max_length=120,
help_text="The group's identifier, formatted as type:identifier, e.g. issn:12345678. "
"This can be as specific (e.g. the journal's ISSN) or vague as required. "
"Valid types include: issn, ringold, orcid-generated, fundref, publons.")
description = CharField(max_length=1000,
help_text="A brief textual description of the group. "
"This can be as specific or vague as required.")
type = CharField(max_length=80, choices=type_choices,
help_text="One of the specified types: publisher; institution; journal; conference; newspaper; "
"newsletter; magazine; peer-review service.")
organisation = ForeignKeyField(
Organisation, related_name="organisation", on_delete="CASCADE", null=True)
class Meta: # noqa: D101,D106
db_table = "group_id_record"
table_alias = "gid"
class AffiliationRecord(RecordModel):
"""Affiliation record loaded from CSV file for batch processing."""
is_active = BooleanField(
default=False, help_text="The record is marked 'active' for batch processing", null=True)
task = ForeignKeyField(Task, related_name="affiliation_records", on_delete="CASCADE")
put_code = IntegerField(null=True)
external_id = CharField(
max_length=100,
null=True,
verbose_name="External ID",
help_text="Record identifier used in the data source system.")
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
first_name = CharField(max_length=120, null=True)
last_name = CharField(max_length=120, null=True)
email = CharField(max_length=80, null=True)
orcid = OrcidIdField(null=True)
organisation = CharField(null=True, index=True, max_length=200)
affiliation_type = CharField(
max_length=20, null=True, choices=[(v, v) for v in AFFILIATION_TYPES])
role = CharField(null=True, verbose_name="Role/Course", max_length=100)
department = CharField(null=True, max_length=200)
start_date = PartialDateField(null=True)
end_date = PartialDateField(null=True)
city = CharField(null=True, max_length=200)
state = CharField(null=True, verbose_name="State/Region", max_length=100)
country = CharField(null=True, verbose_name="Country", max_length=2)
disambiguated_id = CharField(
null=True, max_length=20, verbose_name="Disambiguated Organization Identifier")
disambiguation_source = CharField(
null=True, max_length=100, verbose_name="Disambiguation Source")
class Meta: # noqa: D101,D106
db_table = "affiliation_record"
table_alias = "ar"
_regex_field_map = [
("first_name", r"first\s*(name)?"),
("last_name", r"last\s*(name)?"),
("email", "email"),
("organisation", "organisation|^name"),
("department", "campus|department"),
("city", "city"),
("state", "state|region"),
("role", "course|title|role"),
("start_date", r"start\s*(date)?"),
("end_date", r"end\s*(date)?"),
("affiliation_type", r"affiliation(s)?\s*(type)?|student|staff"),
("country", "country"),
("disambiguated_id", r"disambiguat.*id"),
("disambiguation_source", r"disambiguat.*source"),
("put_code", r"put|code"),
("orcid", "orcid.*"),
("external_id", "external.*|.*identifier"),
]
@classmethod
def load(cls, data, task=None, task_id=None, filename=None, override=True,
skip_schema_validation=False, org=None):
"""Load afffiliation record task form JSON/YAML. Data shoud be already deserialize."""
if isinstance(data, str):
data = json.loads(data) if filename.lower().endswith(".json") else yaml.load(data)
if org is None:
org = current_user.organisation if current_user else None
if not skip_schema_validation:
jsonschema.validate(data, affiliation_task_schema)
if not task and task_id:
task = Task.select().where(Task.id == task_id).first()
if not task and "id" in data:
task_id = int(data["id"])
task = Task.select().where(Task.id == task_id).first()
with db.atomic():
try:
if not task:
filename = (filename or data.get("filename")
or datetime.utcnow().isoformat(timespec="seconds"))
task = Task.create(
org=org, filename=filename, task_type=TaskType.AFFILIATION)
elif override:
AffiliationRecord.delete().where(AffiliationRecord.task == task).execute()
record_fields = AffiliationRecord._meta.fields.keys()
for r in data.get("records"):
if "id" in r and not override:
rec = AffiliationRecord.get(int(r["id"]))
else:
rec = AffiliationRecord(task=task)
for k, v in r.items():
if k == "id":
continue
k = k.replace('-', '_')
if k in record_fields and rec._data.get(k) != v:
rec._data[k] = PartialDate.create(v) if k.endswith("date") else v
rec._dirty.add(k)
if rec.is_dirty():
rec.save()
except:
db.rollback()
app.logger.exception("Failed to load affiliation record task file.")
raise
return task
class FundingRecord(RecordModel):
"""Funding record loaded from JSON file for batch processing."""
task = ForeignKeyField(Task, related_name="funding_records", on_delete="CASCADE")
title = CharField(max_length=255)
translated_title = CharField(null=True, max_length=255)
translated_title_language_code = CharField(null=True, max_length=10)
type = CharField(max_length=255)
organization_defined_type = CharField(null=True, max_length=255)
short_description = CharField(null=True, max_length=4000)
amount = CharField(null=True, max_length=255)
currency = CharField(null=True, max_length=3)
start_date = PartialDateField(null=True)
end_date = PartialDateField(null=True)
org_name = CharField(null=True, max_length=255, verbose_name="Organisation Name")
city = CharField(null=True, max_length=255)
region = CharField(null=True, max_length=255)
country = CharField(null=True, max_length=255)
disambiguated_org_identifier = CharField(null=True, max_length=255)
disambiguation_source = CharField(null=True, max_length=255)
is_active = BooleanField(
default=False, help_text="The record is marked for batch processing", null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
def to_export_dict(self):
"""Map the funding record to dict for export into JSON/YAML."""
d = super().to_export_dict()
d["amount"] = {
"currency-code": self.currency,
"value": self.amount,
}
return d
@classmethod
def load_from_csv(cls, source, filename=None, org=None):
"""Load data from CSV/TSV file or a string."""
if isinstance(source, str):
source = StringIO(source)
if filename is None:
filename = datetime.utcnow().isoformat(timespec="seconds")
reader = csv.reader(source)
header = next(reader)
if len(header) == 1 and '\t' in header[0]:
source.seek(0)
reader = csv.reader(source, delimiter='\t')
header = next(reader)
if len(header) < 2:
raise ModelException("Expected CSV or TSV format file.")
header_rexs = [
re.compile(ex, re.I) for ex in [
r"ext(ernal)?\s*id(entifier)?$", "title$", r"translated\s+(title)?",
r"(translated)?\s*(title)?\s*language\s*(code)?", "type$",
r"org(ani[sz]ation)?\s*(defined)?\s*type", r"(short\s*|description\s*)+$",
"amount", "currency", r"start\s*(date)?", r"end\s*(date)?",
r"(org(gani[zs]ation)?)?\s*name$", "city", "region|state", "country",
r"disambiguated\s*(org(ani[zs]ation)?)?\s*id(entifier)?",
r"disambiguation\s+source$", r"(is)?\s*active$", r"orcid\s*(id)?$", "name$",
"role$", "email", r"(external)?\s*id(entifier)?\s+type$",
r"((external)?\s*id(entifier)?\s+value|funding.*id)$",
r"(external)?\s*id(entifier)?\s*url",
r"(external)?\s*id(entifier)?\s*rel(ationship)?", "put.*code",
r"(is)?\s*visib(bility|le)?", r"first\s*(name)?", r"(last|sur)\s*(name)?",
"identifier", r"excluded?(\s+from(\s+profile)?)?"
]
]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column.strip()):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
if all(idx is None for idx in idxs):
raise ModelException(f"Failed to map fields based on the header of the file: {header}")
if org is None:
org = current_user.organisation if current_user else None
def val(row, i, default=None):
if len(idxs) <= i or idxs[i] is None or idxs[i] >= len(row):
return default
else:
v = row[idxs[i]].strip()
return default if v == '' else v
rows = []
for row_no, row in enumerate(reader):
# skip empty lines:
if len(row) == 0:
continue
if len(row) == 1 and row[0].strip() == '':
continue
funding_type = val(row, 4)
if not funding_type:
raise ModelException(
f"Funding type is mandatory, #{row_no+2}: {row}. Header: {header}")
# The uploaded country must be from ISO 3166-1 alpha-2
country = val(row, 14)
if country:
try:
country = countries.lookup(country).alpha_2
except Exception:
raise ModelException(
f" (Country must be 2 character from ISO 3166-1 alpha-2) in the row "
f"#{row_no+2}: {row}. Header: {header}")
orcid, email = val(row, 18), val(row, 21, "").lower()
if orcid:
validate_orcid_id(orcid)
if email and not validators.email(email):
raise ValueError(
f"Invalid email address '{email}' in the row #{row_no+2}: {row}")
external_id_type = val(row, 22)
external_id_value = val(row, 23)
if bool(external_id_type) != bool(external_id_value):
raise ModelException(
f"Invalid external ID the row #{row_no}. Type: {external_id_type}, Value: {external_id_value}")
name, first_name, last_name = val(row, 19), val(row, 28), val(row, 29)
if not name and first_name and last_name:
name = first_name + ' ' + last_name
# exclude the record from the profile
excluded = val(row, 31)
excluded = bool(excluded and excluded.lower() in ["y", "yes", "true", "1"])
rows.append(
dict(
excluded=excluded,
funding=dict(
# external_identifier = val(row, 0),
title=val(row, 1),
translated_title=val(row, 2),
translated_title_language_code=val(row, 3),
type=funding_type,
organization_defined_type=val(row, 5),
short_description=val(row, 6),
amount=val(row, 7),
currency=val(row, 8),
start_date=PartialDate.create(val(row, 9)),
end_date=PartialDate.create(val(row, 10)),
org_name=val(row, 11) or org.name,
city=val(row, 12) or org.city,
region=val(row, 13) or org.state,
country=country or org.country,
disambiguated_org_identifier=val(row, 15) or org.disambiguated_id,
disambiguation_source=val(row, 16) or org.disambiguation_source),
contributor=dict(
orcid=orcid,
name=name,
role=val(row, 20),
email=email,
),
invitee=dict(
identifier=val(row, 30),
email=email,
first_name=val(row, 28),
last_name=val(row, 29),
orcid=orcid,
put_code=val(row, 26),
visibility=val(row, 27),
),
external_id=dict(
type=external_id_type,
value=external_id_value,
url=val(row, 24),
relationship=val(row, 25))))
with db.atomic():
try:
task = Task.create(org=org, filename=filename, task_type=TaskType.FUNDING)
for funding, records in groupby(rows, key=lambda row: row["funding"].items()):
records = list(records)
fr = cls(task=task, **dict(funding))
validator = ModelValidator(fr)
if not validator.validate():
raise ModelException(f"Invalid record: {validator.errors}")
fr.save()
for contributor in set(
tuple(r["contributor"].items()) for r in records
if r["excluded"]):
fc = FundingContributor(funding_record=fr, **dict(contributor))
validator = ModelValidator(fc)
if not validator.validate():
raise ModelException(f"Invalid contributor record: {validator.errors}")
fc.save()
for external_id in set(
tuple(r["external_id"].items()) for r in records
if r["external_id"]["type"] and r["external_id"]["value"]):
ei = ExternalId(funding_record=fr, **dict(external_id))
ei.save()
for invitee in set(
tuple(r["invitee"].items()) for r in records
if r["invitee"]["email"] and not r["excluded"]):
rec = FundingInvitee(funding_record=fr, **dict(invitee))
validator = ModelValidator(rec)
if not validator.validate():
raise ModelException(f"Invalid invitee record: {validator.errors}")
rec.save()
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load funding file.")
raise
@classmethod
def load_from_json(cls, source, filename=None, org=None, task=None):
"""Load data from JSON file or a string."""
# import data from file based on its extension; either it is YAML or JSON
data = load_yaml_json(filename=filename, source=source)
records = data["records"] if isinstance(data, dict) else data
for r in records:
validation_source_data = copy.deepcopy(r)
validation_source_data = del_none(validation_source_data)
# Adding schema validation for funding
validator = Core(
source_data=validation_source_data,
schema_files=[os.path.join(SCHEMA_DIR, "funding_schema.yaml")])
validator.validate(raise_exception=True)
with db.atomic():
try:
if org is None:
org = current_user.organisation if current_user else None
if not task:
task = Task.create(org=org, filename=filename, task_type=TaskType.FUNDING)
else:
FundingRecord.delete().where(FundingRecord.task == task).execute()
for r in records:
title = r.get("title", "title", "value")
translated_title = r.get("title", "translated-title", "value")
translated_title_language_code = r.get("title", "translated-title",
"language-code")
type = r.get("type")
organization_defined_type = r.get("organization-defined-type", "value")
short_description = r.get("short-description")
amount = r.get("amount", "value")
currency = r.get("amount", "currency-code")
start_date = PartialDate.create(r.get("start-date"))
end_date = PartialDate.create(r.get("end-date"))
org_name = r.get("organization", "name")
city = r.get("organization", "address", "city")
region = r.get("organization", "address", "region")
country = r.get("organization", "address", "country")
disambiguated_org_identifier = r.get("organization",
"disambiguated-organization",
"disambiguated-organization-identifier")
disambiguation_source = r.get("organization", "disambiguated-organization",
"disambiguation-source")
funding_record = cls.create(
task=task,
title=title,
translated_title=translated_title,
translated_title_language_code=translated_title_language_code,
type=type,
organization_defined_type=organization_defined_type,
short_description=short_description,
amount=amount,
currency=currency,
org_name=org_name,
city=city,
region=region,
country=country,
disambiguated_org_identifier=disambiguated_org_identifier,
disambiguation_source=disambiguation_source,
start_date=start_date,
end_date=end_date)
invitees = r.get("invitees", default=[])
if invitees:
for invitee in invitees:
identifier = invitee.get("identifier")
email = invitee.get("email")
first_name = invitee.get("first-name")
last_name = invitee.get("last-name")
orcid_id = invitee.get("ORCID-iD")
put_code = invitee.get("put-code")
visibility = invitee.get("visibility")
FundingInvitee.create(
funding_record=funding_record,
identifier=identifier,
email=email.lower(),
first_name=first_name,
last_name=last_name,
orcid=orcid_id,
visibility=visibility,
put_code=put_code)
else:
raise SchemaError(u"Schema validation failed:\n - "
u"Expecting Invitees for which the funding record will be written")
contributors = r.get("contributors", "contributor", default=[])
if contributors:
for contributor in contributors:
orcid_id = contributor.get("contributor-orcid", "path")
name = contributor.get("credit-name", "value")
email = contributor.get("contributor-email", "value")
role = contributor.get("contributor-attributes", "contributor-role")
FundingContributor.create(
funding_record=funding_record,
orcid=orcid_id,
name=name,
email=email,
role=role)
external_ids = r.get("external-ids", "external-id", default=[])
if external_ids:
for external_id in external_ids:
type = external_id.get("external-id-type")
value = external_id.get("external-id-value")
url = external_id.get("external-id-url", "value")
relationship = external_id.get("external-id-relationship")
ExternalId.create(
funding_record=funding_record,
type=type,
value=value,
url=url,
relationship=relationship)
else:
raise SchemaError(u"Schema validation failed:\n - An external identifier is required")
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load funding file.")
raise
class Meta: # noqa: D101,D106
db_table = "funding_record"
table_alias = "fr"
class PeerReviewRecord(RecordModel):
"""Peer Review record loaded from Json file for batch processing."""
task = ForeignKeyField(Task, related_name="peer_review_records", on_delete="CASCADE")
review_group_id = CharField(max_length=255)
reviewer_role = CharField(null=True, max_length=255)
review_url = CharField(null=True, max_length=255)
review_type = CharField(null=True, max_length=255)
review_completion_date = PartialDateField(null=True)
subject_external_id_type = CharField(null=True, max_length=255)
subject_external_id_value = CharField(null=True, max_length=255)
subject_external_id_url = CharField(null=True, max_length=255)
subject_external_id_relationship = CharField(null=True, max_length=255)
subject_container_name = CharField(null=True, max_length=255)
subject_type = CharField(null=True, max_length=80)
subject_name_title = CharField(null=True, max_length=255)
subject_name_subtitle = CharField(null=True, max_length=255)
subject_name_translated_title_lang_code = CharField(null=True, max_length=10)
subject_name_translated_title = CharField(null=True, max_length=255)
subject_url = CharField(null=True, max_length=255)
convening_org_name = CharField(null=True, max_length=255)
convening_org_city = CharField(null=True, max_length=255)
convening_org_region = CharField(null=True, max_length=255)
convening_org_country = CharField(null=True, max_length=255)
convening_org_disambiguated_identifier = CharField(null=True, max_length=255)
convening_org_disambiguation_source = CharField(null=True, max_length=255)
is_active = BooleanField(
default=False, help_text="The record is marked for batch processing", null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
@classmethod
def load_from_csv(cls, source, filename=None, org=None):
"""Load data from CSV/TSV file or a string."""
if isinstance(source, str):
source = StringIO(source)
if filename is None:
filename = datetime.utcnow().isoformat(timespec="seconds")
reader = csv.reader(source)
header = next(reader)
if len(header) == 1 and '\t' in header[0]:
source.seek(0)
reader = csv.reader(source, delimiter='\t')
header = next(reader)
if len(header) < 2:
raise ModelException("Expected CSV or TSV format file.")
header_rexs = [
re.compile(ex, re.I) for ex in [
r"review\s*group\s*id(entifier)?$",
r"(reviewer)?\s*role$",
r"review\s*url$",
r"review\s*type$",
r"(review\s*completion)?.*date",
r"subject\s+external\s*id(entifier)?\s+type$",
r"subject\s+external\s*id(entifier)?\s+value$",
r"subject\s+external\s*id(entifier)?\s+url$",
r"subject\s+external\s*id(entifier)?\s+rel(ationship)?$",
r"subject\s+container\s+name$",
r"(subject)?\s*type$",
r"(subject)?\s*(name)?\s*title$",
r"(subject)?\s*(name)?\s*subtitle$",
r"(subject)?\s*(name)?\s*(translated)?\s*(title)?\s*lang(uage)?.*(code)?",
r"(subject)?\s*(name)?\s*translated\s*title$",
r"(subject)?\s*url$",
r"(convening)?\s*org(ani[zs]ation)?\s*name$",
r"(convening)?\s*org(ani[zs]ation)?\s*city",
r"(convening)?\s*org(ani[zs]ation)?\s*region$",
r"(convening)?\s*org(ani[zs]ation)?\s*country$",
r"(convening)?\s*(org(ani[zs]ation)?)?\s*disambiguated\s*id(entifier)?",
r"(convening)?\s*(org(ani[zs]ation)?)?\s*disambiguation\s*source$",
"email",
r"orcid\s*(id)?$",
"identifier",
r"first\s*(name)?",
r"(last|sur)\s*(name)?",
"put.*code",
r"(is)?\s*visib(ility|le)?",
r"(external)?\s*id(entifier)?\s+type$",
r"((external)?\s*id(entifier)?\s+value|peer\s*review.*id)$",
r"(external)?\s*id(entifier)?\s*url",
r"(external)?\s*id(entifier)?\s*rel(ationship)?",
r"(is)?\s*active$", ]]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column.strip()):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
if all(idx is None for idx in idxs):
raise ModelException(f"Failed to map fields based on the header of the file: {header}")
if org is None:
org = current_user.organisation if current_user else None
def val(row, i, default=None):
if len(idxs) <= i or idxs[i] is None or idxs[i] >= len(row):
return default
else:
v = row[idxs[i]].strip()
return default if v == '' else v
rows = []
for row_no, row in enumerate(reader):
# skip empty lines:
if len(row) == 0:
continue
if len(row) == 1 and row[0].strip() == '':
continue
review_group_id = val(row, 0)
if not review_group_id:
raise ModelException(
f"Review Group ID is mandatory, #{row_no+2}: {row}. Header: {header}")
convening_org_name = val(row, 16)
convening_org_city = val(row, 17)
convening_org_country = val(row, 19)
if not (convening_org_name and convening_org_city and convening_org_country):
raise ModelException(
f"Information about Convening Organisation (Name, City and Country) is mandatory, "
f"#{row_no+2}: {row}. Header: {header}")
# The uploaded country must be from ISO 3166-1 alpha-2
if convening_org_country:
try:
convening_org_country = countries.lookup(convening_org_country).alpha_2
except Exception:
raise ModelException(
f" (Convening Org Country must be 2 character from ISO 3166-1 alpha-2) in the row "
f"#{row_no+2}: {row}. Header: {header}")
orcid, email = val(row, 23), val(row, 22, "").lower()
if orcid:
validate_orcid_id(orcid)
if email and not validators.email(email):
raise ValueError(
f"Invalid email address '{email}' in the row #{row_no+2}: {row}")
external_id_type = val(row, 29)
external_id_value = val(row, 30)
if bool(external_id_type) != bool(external_id_value):
raise ModelException(
f"Invalid External ID the row #{row_no}.Type:{external_id_type},Peer Review Id:{external_id_value}")
review_completion_date = val(row, 4)
if review_completion_date:
review_completion_date = PartialDate.create(review_completion_date)
rows.append(
dict(
peer_review=dict(
review_group_id=review_group_id,
reviewer_role=val(row, 1),
review_url=val(row, 2),
review_type=val(row, 3),
review_completion_date=review_completion_date,
subject_external_id_type=val(row, 5),
subject_external_id_value=val(row, 6),
subject_external_id_url=val(row, 7),
subject_external_id_relationship=val(row, 8),
subject_container_name=val(row, 9),
subject_type=val(row, 10),
subject_name_title=val(row, 11),
subject_name_subtitle=val(row, 12),
subject_name_translated_title_lang_code=val(row, 13),
subject_name_translated_title=val(row, 14),
subject_url=val(row, 15),
convening_org_name=convening_org_name,
convening_org_city=convening_org_city,
convening_org_region=val(row, 18),
convening_org_country=convening_org_country,
convening_org_disambiguated_identifier=val(row, 20),
convening_org_disambiguation_source=val(row, 21),
),
invitee=dict(
email=email,
orcid=orcid,
identifier=val(row, 24),
first_name=val(row, 25),
last_name=val(row, 26),
put_code=val(row, 27),
visibility=val(row, 28),
),
external_id=dict(
type=external_id_type,
value=external_id_value,
url=val(row, 31),
relationship=val(row, 32))))
with db.atomic():
try:
task = Task.create(org=org, filename=filename, task_type=TaskType.PEER_REVIEW)
for peer_review, records in groupby(rows, key=lambda row: row["peer_review"].items()):
records = list(records)
prr = cls(task=task, **dict(peer_review))
validator = ModelValidator(prr)
if not validator.validate():
raise ModelException(f"Invalid record: {validator.errors}")
prr.save()
for external_id in set(tuple(r["external_id"].items()) for r in records if
r["external_id"]["type"] and r["external_id"]["value"]):
ei = PeerReviewExternalId(peer_review_record=prr, **dict(external_id))
ei.save()
for invitee in set(tuple(r["invitee"].items()) for r in records if r["invitee"]["email"]):
rec = PeerReviewInvitee(peer_review_record=prr, **dict(invitee))
validator = ModelValidator(rec)
if not validator.validate():
raise ModelException(f"Invalid invitee record: {validator.errors}")
rec.save()
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load peer review file.")
raise
@classmethod
def load_from_json(cls, source, filename=None, org=None):
"""Load data from JSON file or a string."""
if isinstance(source, str):
# import data from file based on its extension; either it is YAML or JSON
peer_review_data_list = load_yaml_json(filename=filename, source=source)
for peer_review_data in peer_review_data_list:
validation_source_data = copy.deepcopy(peer_review_data)
validation_source_data = del_none(validation_source_data)
validator = Core(
source_data=validation_source_data,
schema_files=[os.path.join(SCHEMA_DIR, "peer_review_schema.yaml")])
validator.validate(raise_exception=True)
try:
if org is None:
org = current_user.organisation if current_user else None
task = Task.create(org=org, filename=filename, task_type=TaskType.PEER_REVIEW)
for peer_review_data in peer_review_data_list:
review_group_id = peer_review_data.get("review-group-id") if peer_review_data.get(
"review-group-id") else None
reviewer_role = peer_review_data.get("reviewer-role") if peer_review_data.get(
"reviewer-role") else None
review_url = peer_review_data.get("review-url").get("value") if peer_review_data.get(
"review-url") else None
review_type = peer_review_data.get("review-type") if peer_review_data.get("review-type") else None
review_completion_date = PartialDate.create(peer_review_data.get("review-completion-date"))
subject_external_id_type = peer_review_data.get("subject-external-identifier").get(
"external-id-type") if peer_review_data.get(
"subject-external-identifier") else None
subject_external_id_value = peer_review_data.get("subject-external-identifier").get(
"external-id-value") if peer_review_data.get(
"subject-external-identifier") else None
subject_external_id_url = peer_review_data.get("subject-external-identifier").get(
"external-id-url").get("value") if peer_review_data.get(
"subject-external-identifier") and peer_review_data.get("subject-external-identifier").get(
"external-id-url") else None
subject_external_id_relationship = peer_review_data.get("subject-external-identifier").get(
"external-id-relationship") if peer_review_data.get(
"subject-external-identifier") else None
subject_container_name = peer_review_data.get("subject-container-name").get(
"value") if peer_review_data.get(
"subject-container-name") else None
subject_type = peer_review_data.get("subject-type") if peer_review_data.get(
"subject-type") else None
subject_name_title = peer_review_data.get("subject-name").get("title").get(
"value") if peer_review_data.get(
"subject-name") and peer_review_data.get("subject-name").get("title") else None
subject_name_subtitle = peer_review_data.get("subject-name").get("subtitle").get(
"value") if peer_review_data.get(
"subject-name") and peer_review_data.get("subject-name").get("subtitle") else None
subject_name_translated_title_lang_code = peer_review_data.get("subject-name").get(
"translated-title").get(
"language-code") if peer_review_data.get(
"subject-name") and peer_review_data.get("subject-name").get("translated-title") else None
subject_name_translated_title = peer_review_data.get("subject-name").get(
"translated-title").get(
"value") if peer_review_data.get(
"subject-name") and peer_review_data.get("subject-name").get("translated-title") else None
subject_url = peer_review_data.get("subject-url").get("value") if peer_review_data.get(
"subject-name") else None
convening_org_name = peer_review_data.get("convening-organization").get(
"name") if peer_review_data.get(
"convening-organization") else None
convening_org_city = peer_review_data.get("convening-organization").get("address").get(
"city") if peer_review_data.get("convening-organization") and peer_review_data.get(
"convening-organization").get("address") else None
convening_org_region = peer_review_data.get("convening-organization").get("address").get(
"region") if peer_review_data.get("convening-organization") and peer_review_data.get(
"convening-organization").get("address") else None
convening_org_country = peer_review_data.get("convening-organization").get("address").get(
"country") if peer_review_data.get("convening-organization") and peer_review_data.get(
"convening-organization").get("address") else None
convening_org_disambiguated_identifier = peer_review_data.get(
"convening-organization").get("disambiguated-organization").get(
"disambiguated-organization-identifier") if peer_review_data.get(
"convening-organization") and peer_review_data.get("convening-organization").get(
"disambiguated-organization") else None
convening_org_disambiguation_source = peer_review_data.get(
"convening-organization").get("disambiguated-organization").get(
"disambiguation-source") if peer_review_data.get(
"convening-organization") and peer_review_data.get("convening-organization").get(
"disambiguated-organization") else None
peer_review_record = PeerReviewRecord.create(
task=task,
review_group_id=review_group_id,
reviewer_role=reviewer_role,
review_url=review_url,
review_type=review_type,
review_completion_date=review_completion_date,
subject_external_id_type=subject_external_id_type,
subject_external_id_value=subject_external_id_value,
subject_external_id_url=subject_external_id_url,
subject_external_id_relationship=subject_external_id_relationship,
subject_container_name=subject_container_name,
subject_type=subject_type,
subject_name_title=subject_name_title,
subject_name_subtitle=subject_name_subtitle,
subject_name_translated_title_lang_code=subject_name_translated_title_lang_code,
subject_name_translated_title=subject_name_translated_title,
subject_url=subject_url,
convening_org_name=convening_org_name,
convening_org_city=convening_org_city,
convening_org_region=convening_org_region,
convening_org_country=convening_org_country,
convening_org_disambiguated_identifier=convening_org_disambiguated_identifier,
convening_org_disambiguation_source=convening_org_disambiguation_source)
invitee_list = peer_review_data.get("invitees")
if invitee_list:
for invitee in invitee_list:
identifier = invitee.get("identifier") if invitee.get("identifier") else None
email = invitee.get("email") if invitee.get("email") else None
first_name = invitee.get("first-name") if invitee.get("first-name") else None
last_name = invitee.get("last-name") if invitee.get("last-name") else None
orcid_id = invitee.get("ORCID-iD") if invitee.get("ORCID-iD") else None
put_code = invitee.get("put-code") if invitee.get("put-code") else None
visibility = get_val(invitee, "visibility")
PeerReviewInvitee.create(
peer_review_record=peer_review_record,
identifier=identifier,
email=email.lower(),
first_name=first_name,
last_name=last_name,
orcid=orcid_id,
visibility=visibility,
put_code=put_code)
else:
raise SchemaError(u"Schema validation failed:\n - "
u"Expecting Invitees for which the peer review record will be written")
external_ids_list = peer_review_data.get("review-identifiers").get("external-id") if \
peer_review_data.get("review-identifiers") else None
if external_ids_list:
for external_id in external_ids_list:
type = external_id.get("external-id-type")
value = external_id.get("external-id-value")
url = external_id.get("external-id-url").get("value") if \
external_id.get("external-id-url") else None
relationship = external_id.get("external-id-relationship")
PeerReviewExternalId.create(
peer_review_record=peer_review_record,
type=type,
value=value,
url=url,
relationship=relationship)
else:
raise SchemaError(u"Schema validation failed:\n - An external identifier is required")
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load peer review file.")
raise
class Meta: # noqa: D101,D106
db_table = "peer_review_record"
table_alias = "pr"
class ResearcherUrlRecord(RecordModel):
"""Researcher Url record loaded from Json file for batch processing."""
task = ForeignKeyField(Task, related_name="researcher_url_records", on_delete="CASCADE")
url_name = CharField(max_length=255)
url_value = CharField(max_length=255)
display_index = IntegerField(null=True)
email = CharField(max_length=120)
first_name = CharField(max_length=120)
last_name = CharField(max_length=120)
orcid = OrcidIdField(null=True)
put_code = IntegerField(null=True)
visibility = CharField(null=True, max_length=100)
is_active = BooleanField(
default=False, help_text="The record is marked for batch processing", null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
@classmethod
def load_from_csv(cls, source, filename=None, org=None):
"""Load data from CSV/TSV file or a string."""
if isinstance(source, str):
source = StringIO(source)
if filename is None:
if hasattr(source, "name"):
filename = source.name
else:
filename = datetime.utcnow().isoformat(timespec="seconds")
reader = csv.reader(source)
header = next(reader)
if len(header) == 1 and '\t' in header[0]:
source.seek(0)
reader = csv.reader(source, delimiter='\t')
header = next(reader)
if len(header) < 2:
raise ModelException("Expected CSV or TSV format file.")
if len(header) < 5:
raise ModelException(
"Wrong number of fields. Expected at least 5 fields "
"(first name, last name, email address or another unique identifier, url name, url value). "
f"Read header: {header}")
header_rexs = [
re.compile(ex, re.I) for ex in (r"(url)?.*name", r"(url)?.*value", r"(display)?.*index",
"email", r"first\s*(name)?", r"(last|sur)\s*(name)?",
"orcid.*", r"put|code", r"(is)?\s*visib(bility|le)?")]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column.strip()):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
if all(idx is None for idx in idxs):
raise ModelException(f"Failed to map fields based on the header of the file: {header}")
if org is None:
org = current_user.organisation if current_user else None
def val(row, i, default=None):
if len(idxs) <= i or idxs[i] is None or idxs[i] >= len(row):
return default
else:
v = row[idxs[i]].strip()
return default if v == '' else v
with db.atomic():
try:
task = Task.create(org=org, filename=filename, task_type=TaskType.RESEARCHER_URL)
for row_no, row in enumerate(reader):
# skip empty lines:
if len(row) == 0:
continue
if len(row) == 1 and row[0].strip() == '':
continue
email = val(row, 3, "").lower()
orcid = val(row, 6)
if not (email or orcid):
raise ModelException(
f"Missing user identifier (email address or ORCID iD) in the row "
f"#{row_no+2}: {row}. Header: {header}")
if orcid:
validate_orcid_id(orcid)
if not email or not validators.email(email):
raise ValueError(
f"Invalid email address '{email}' in the row #{row_no+2}: {row}")
url_name = val(row, 0, "")
url_value = val(row, 1, "")
first_name = val(row, 4)
last_name = val(row, 5)
if not (url_name and url_value and first_name and last_name):
raise ModelException(
"Wrong number of fields. Expected at least 5 fields (url name, url value, first name, "
f"last name, email address or another unique identifier): {row}")
rr = cls(
task=task,
url_name=url_name,
url_value=url_value,
display_index=val(row, 2),
email=email,
first_name=first_name,
last_name=last_name,
orcid=orcid,
put_code=val(row, 7),
visibility=val(row, 8))
validator = ModelValidator(rr)
if not validator.validate():
raise ModelException(f"Invalid record: {validator.errors}")
rr.save()
except Exception:
db.rollback()
app.logger.exception("Failed to load Researcher Url Record file.")
raise
return task
@classmethod
def load_from_json(cls, source, filename=None, org=None, task=None, skip_schema_validation=False):
"""Load data from JSON file or a string."""
data = load_yaml_json(filename=filename, source=source)
if not skip_schema_validation:
jsonschema.validate(data, researcher_url_task_schema)
records = data["records"] if isinstance(data, dict) else data
with db.atomic():
try:
if org is None:
org = current_user.organisation if current_user else None
if not task:
task = Task.create(org=org, filename=filename, task_type=TaskType.RESEARCHER_URL)
# else:
# ResearcherUrlRecord.delete().where(ResearcherUrlRecord.task == task).execute()
for r in records:
url_name = r.get("url-name")
url_value = r.get("url", "value") or r.get("url-value")
display_index = r.get("display-index")
email = r.get("email")
first_name = r.get("first-name")
last_name = r.get("last-name")
orcid_id = r.get("ORCID-iD") or r.get("orcid")
put_code = r.get("put-code")
visibility = r.get("visibility")
cls.create(
task=task,
url_name=url_name,
url_value=url_value,
display_index=display_index,
email=email.lower(),
first_name=first_name,
last_name=last_name,
orcid=orcid_id,
visibility=visibility,
put_code=put_code)
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load Researcher Url file.")
raise
class Meta: # noqa: D101,D106
db_table = "researcher_url_record"
table_alias = "ru"
class OtherNameRecord(RecordModel):
"""Other Name record loaded from Json file for batch processing."""
task = ForeignKeyField(Task, related_name="other_name_records", on_delete="CASCADE")
content = CharField(max_length=255)
display_index = IntegerField(null=True)
email = CharField(max_length=120)
first_name = CharField(max_length=120)
last_name = CharField(max_length=120)
orcid = OrcidIdField(null=True)
put_code = IntegerField(null=True)
visibility = CharField(null=True, max_length=100)
is_active = BooleanField(
default=False, help_text="The record is marked for batch processing", null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
@classmethod
def load_from_csv(cls, source, filename=None, org=None):
"""Load other names data from CSV/TSV file or a string."""
if isinstance(source, str):
source = StringIO(source)
if filename is None:
if hasattr(source, "name"):
filename = source.name
else:
filename = datetime.utcnow().isoformat(timespec="seconds")
reader = csv.reader(source)
header = next(reader)
if len(header) == 1 and '\t' in header[0]:
source.seek(0)
reader = csv.reader(source, delimiter='\t')
header = next(reader)
if len(header) < 2:
raise ModelException("Expected CSV or TSV format file.")
if len(header) < 4:
raise ModelException(
"Wrong number of fields. Expected at least 4 fields (first name, last name, email address "
f"or another unique identifier, content). Read header: {header}")
header_rexs = [
re.compile(ex, re.I) for ex in ("content", r"(display)?.*index", "email", r"first\s*(name)?",
r"(last|sur)\s*(name)?", "orcid.*", r"put|code",
r"(is)?\s*visib(bility|le)?")]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column.strip()):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
if all(idx is None for idx in idxs):
raise ModelException(f"Failed to map fields based on the header of the file: {header}")
if org is None:
org = current_user.organisation if current_user else None
def val(row, i, default=None):
if len(idxs) <= i or idxs[i] is None or idxs[i] >= len(row):
return default
else:
v = row[idxs[i]].strip()
return default if v == '' else v
with db.atomic():
try:
task = Task.create(org=org, filename=filename, task_type=TaskType.OTHER_NAME)
for row_no, row in enumerate(reader):
# skip empty lines:
if len(row) == 0:
continue
if len(row) == 1 and row[0].strip() == '':
continue
email = val(row, 2, "").lower()
orcid = val(row, 5)
if not (email or orcid):
raise ModelException(
f"Missing user identifier (email address or ORCID iD) in the row "
f"#{row_no+2}: {row}. Header: {header}")
if orcid:
validate_orcid_id(orcid)
if not email or not validators.email(email):
raise ValueError(
f"Invalid email address '{email}' in the row #{row_no+2}: {row}")
content = val(row, 0, "")
first_name = val(row, 3)
last_name = val(row, 4)
if not (content and first_name and last_name):
raise ModelException(
"Wrong number of fields. Expected at least 4 fields (content, first name, last name, "
f"email address or another unique identifier): {row}")
ot = cls(
task=task,
content=content,
display_index=val(row, 1),
email=email,
first_name=first_name,
last_name=last_name,
orcid=orcid,
put_code=val(row, 6),
visibility=val(row, 7))
validator = ModelValidator(ot)
if not validator.validate():
raise ModelException(f"Invalid record: {validator.errors}")
ot.save()
except Exception:
db.rollback()
app.logger.exception("Failed to load Researcher Url Record file.")
raise
return task
@classmethod
def load_from_json(cls, source, filename=None, org=None, task=None, skip_schema_validation=False):
"""Load data from JSON file or a string."""
data = load_yaml_json(filename=filename, source=source)
if not skip_schema_validation:
jsonschema.validate(data, other_name_task_schema)
records = data["records"] if isinstance(data, dict) else data
with db.atomic():
try:
if org is None:
org = current_user.organisation if current_user else None
if not task:
task = Task.create(org=org, filename=filename, task_type=TaskType.OTHER_NAME)
# else:
# OtherNameRecord.delete().where(OtherNameRecord.task == task).execute()
for r in records:
content = r.get("content")
display_index = r.get("display-index")
email = r.get("email")
first_name = r.get("first-name")
last_name = r.get("last-name")
orcid_id = r.get("ORCID-iD") or r.get("orcid")
put_code = r.get("put-code")
visibility = r.get("visibility")
cls.create(
task=task,
content=content,
display_index=display_index,
email=email.lower(),
first_name=first_name,
last_name=last_name,
orcid=orcid_id,
visibility=visibility,
put_code=put_code)
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load Other Name Record file.")
raise
class Meta: # noqa: D101,D106
db_table = "other_name_record"
table_alias = "onr"
class WorkRecord(RecordModel):
"""Work record loaded from Json file for batch processing."""
task = ForeignKeyField(Task, related_name="work_records", on_delete="CASCADE")
title = CharField(max_length=255)
sub_title = CharField(null=True, max_length=255)
translated_title = CharField(null=True, max_length=255)
translated_title_language_code = CharField(null=True, max_length=10)
journal_title = CharField(null=True, max_length=255)
short_description = CharField(null=True, max_length=4000)
citation_type = CharField(max_length=255)
citation_value = CharField(max_length=255)
type = CharField(null=True, max_length=255)
publication_date = PartialDateField(null=True)
publication_media_type = CharField(null=True, max_length=255)
url = CharField(null=True, max_length=255)
language_code = CharField(null=True, max_length=10)
country = CharField(null=True, max_length=255)
is_active = BooleanField(
default=False, help_text="The record is marked for batch processing", null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
@classmethod
def load_from_csv(cls, source, filename=None, org=None):
"""Load data from CSV/TSV file or a string."""
if isinstance(source, str):
source = StringIO(source)
if filename is None:
filename = datetime.utcnow().isoformat(timespec="seconds")
reader = csv.reader(source)
header = next(reader)
if len(header) == 1 and '\t' in header[0]:
source.seek(0)
reader = csv.reader(source, delimiter='\t')
header = next(reader)
if len(header) < 2:
raise ModelException("Expected CSV or TSV format file.")
header_rexs = [
re.compile(ex, re.I) for ex in [
r"ext(ernal)?\s*id(entifier)?$",
"title$",
r"sub.*(title)?$",
r"translated\s+(title)?",
r"(translated)?\s*(title)?\s*language\s*(code)?",
r"journal",
"type$",
r"(short\s*|description\s*)+$",
r"citat(ion)?.*type",
r"citat(ion)?.*value",
r"(publication)?.*date",
r"(publ(ication?))?.*media.*(type)?",
r"url",
r"lang(uage)?.*(code)?",
r"country",
r"(is)?\s*active$",
r"orcid\s*(id)?$",
"name$",
"role$",
"email",
r"(external)?\s*id(entifier)?\s+type$",
r"((external)?\s*id(entifier)?\s+value|work.*id)$",
r"(external)?\s*id(entifier)?\s*url",
r"(external)?\s*id(entifier)?\s*rel(ationship)?",
"put.*code",
r"(is)?\s*visib(bility|le)?",
r"first\s*(name)?",
r"(last|sur)\s*(name)?",
"identifier",
r"excluded?(\s+from(\s+profile)?)?"
]
]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column.strip()):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
if all(idx is None for idx in idxs):
raise ModelException(f"Failed to map fields based on the header of the file: {header}")
if org is None:
org = current_user.organisation if current_user else None
def val(row, i, default=None):
if len(idxs) <= i or idxs[i] is None or idxs[i] >= len(row):
return default
else:
v = row[idxs[i]].strip()
return default if v == '' else v
rows = []
for row_no, row in enumerate(reader):
# skip empty lines:
if len(row) == 0:
continue
if len(row) == 1 and row[0].strip() == '':
continue
work_type = val(row, 6)
if not work_type:
raise ModelException(
f"Funding type is mandatory, #{row_no+2}: {row}. Header: {header}")
# The uploaded country must be from ISO 3166-1 alpha-2
country = val(row, 14)
if country:
try:
country = countries.lookup(country).alpha_2
except Exception:
raise ModelException(
f" (Country must be 2 character from ISO 3166-1 alpha-2) in the row "
f"#{row_no+2}: {row}. Header: {header}")
orcid, email = val(row, 16), val(row, 19, "").lower()
if orcid:
validate_orcid_id(orcid)
if email and not validators.email(email):
raise ValueError(
f"Invalid email address '{email}' in the row #{row_no+2}: {row}")
external_id_type = val(row, 20)
external_id_value = val(row, 21)
if bool(external_id_type) != bool(external_id_value):
raise ModelException(
f"Invalid external ID the row #{row_no}. Type: {external_id_type}, Value: {external_id_value}")
name, first_name, last_name = val(row, 17), val(row, 26), val(row, 27)
if not name and first_name and last_name:
name = first_name + ' ' + last_name
# exclude the record from the profile
excluded = val(row, 29)
excluded = bool(excluded and excluded.lower() in ["y", "yes", "true", "1"])
publication_date = val(row, 10)
if publication_date:
publication_date = PartialDate.create(publication_date)
rows.append(
dict(
excluded=excluded,
work=dict(
# external_identifier = val(row, 0),
title=val(row, 1),
sub_title=val(row, 2),
translated_title=val(row, 3),
translated_title_language_code=val(row, 4),
journal_title=val(row, 5),
type=work_type,
short_description=val(row, 7),
citation_type=val(row, 8),
citation_value=val(row, 9),
publication_date=publication_date,
publication_media_type=val(row, 11),
url=val(row, 12),
language_code=val(row, 13),
country=val(row, 14),
is_active=False,
),
contributor=dict(
orcid=orcid,
name=name,
role=val(row, 18),
email=email,
),
invitee=dict(
identifier=val(row, 28),
email=email,
first_name=first_name,
last_name=last_name,
orcid=orcid,
put_code=val(row, 24),
visibility=val(row, 25),
),
external_id=dict(
type=external_id_type,
value=external_id_value,
url=val(row, 22),
relationship=val(row, 23))))
with db.atomic():
try:
task = Task.create(org=org, filename=filename, task_type=TaskType.WORK)
for work, records in groupby(rows, key=lambda row: row["work"].items()):
records = list(records)
wr = cls(task=task, **dict(work))
validator = ModelValidator(wr)
if not validator.validate():
raise ModelException(f"Invalid record: {validator.errors}")
wr.save()
for contributor in set(
tuple(r["contributor"].items()) for r in records
if r["excluded"]):
fc = WorkContributor(work_record=wr, **dict(contributor))
validator = ModelValidator(fc)
if not validator.validate():
raise ModelException(f"Invalid contributor record: {validator.errors}")
fc.save()
for external_id in set(
tuple(r["external_id"].items()) for r in records
if r["external_id"]["type"] and r["external_id"]["value"]):
ei = WorkExternalId(work_record=wr, **dict(external_id))
ei.save()
for invitee in set(
tuple(r["invitee"].items()) for r in records
if r["invitee"]["email"] and not r["excluded"]):
rec = WorkInvitee(work_record=wr, **dict(invitee))
validator = ModelValidator(rec)
if not validator.validate():
raise ModelException(f"Invalid invitee record: {validator.errors}")
rec.save()
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load work file.")
raise
@classmethod
def load_from_json(cls, source, filename=None, org=None, task=None, **kwargs):
"""Load data from JSON file or a string."""
if isinstance(source, str):
# import data from file based on its extension; either it is YAML or JSON
work_data_list = load_yaml_json(filename=filename, source=source, content_type="json")
if not filename:
filename = work_data_list.get("filename")
if isinstance(work_data_list, dict):
work_data_list = work_data_list.get("records")
# TODO: validation of uploaded work file
for work_data in work_data_list:
validation_source_data = copy.deepcopy(work_data)
validation_source_data = del_none(validation_source_data)
# Adding schema validation for Work
validator = Core(
source_data=validation_source_data,
schema_files=[os.path.join(SCHEMA_DIR, "work_schema.yaml")])
validator.validate(raise_exception=True)
try:
if org is None:
org = current_user.organisation if current_user else None
if not task:
task = Task.create(org=org, filename=filename, task_type=TaskType.WORK)
for work_data in work_data_list:
title = get_val(work_data, "title", "title", "value")
sub_title = get_val(work_data, "title", "subtitle", "value")
translated_title = get_val(work_data, "title", "translated-title", "value")
translated_title_language_code = get_val(work_data, "title", "translated-title", "language-code")
journal_title = get_val(work_data, "journal-title", "value")
short_description = get_val(work_data, "short-description")
citation_type = get_val(work_data, "citation", "citation-type")
citation_value = get_val(work_data, "citation", "citation-value")
type = get_val(work_data, "type")
publication_media_type = get_val(work_data, "publication-date", "media-type")
url = get_val(work_data, "url", "value")
language_code = get_val(work_data, "language-code")
country = get_val(work_data, "country", "value")
# Removing key 'media-type' from the publication_date dict. and only considering year, day & month
publication_date = PartialDate.create(
{date_key: work_data.get("publication-date")[date_key] for date_key in
('day', 'month', 'year')}) if work_data.get("publication-date") else None
work_record = WorkRecord.create(
task=task,
title=title,
sub_title=sub_title,
translated_title=translated_title,
translated_title_language_code=translated_title_language_code,
journal_title=journal_title,
short_description=short_description,
citation_type=citation_type,
citation_value=citation_value,
type=type,
publication_date=publication_date,
publication_media_type=publication_media_type,
url=url,
language_code=language_code,
country=country)
invitee_list = work_data.get("invitees")
if invitee_list:
for invitee in invitee_list:
identifier = invitee.get("identifier")
email = invitee.get("email")
first_name = invitee.get("first-name")
last_name = invitee.get("last-name")
orcid_id = invitee.get("ORCID-iD")
put_code = invitee.get("put-code")
visibility = get_val(invitee, "visibility")
WorkInvitee.create(
work_record=work_record,
identifier=identifier,
email=email.lower(),
first_name=first_name,
last_name=last_name,
orcid=orcid_id,
visibility=visibility,
put_code=put_code)
else:
raise SchemaError(u"Schema validation failed:\n - "
u"Expecting Invitees for which the work record will be written")
contributor_list = work_data.get("contributors", "contributor")
if contributor_list:
for contributor in contributor_list:
orcid_id = get_val(contributor, "contributor-orcid", "path")
name = get_val(contributor, "credit-name", "value")
email = get_val(contributor, "contributor-email", "value")
role = get_val(contributor, "contributor-attributes", "contributor-role")
contributor_sequence = get_val(contributor, "contributor-attributes",
"contributor-sequence")
WorkContributor.create(
work_record=work_record,
orcid=orcid_id,
name=name,
email=email,
role=role,
contributor_sequence=contributor_sequence)
external_ids_list = work_data.get("external-ids").get("external-id") if \
work_data.get("external-ids") else None
if external_ids_list:
for external_id in external_ids_list:
type = external_id.get("external-id-type")
value = external_id.get("external-id-value")
url = get_val(external_id, "external-id-url", "value")
relationship = external_id.get("external-id-relationship")
WorkExternalId.create(
work_record=work_record,
type=type,
value=value,
url=url,
relationship=relationship)
else:
raise SchemaError(u"Schema validation failed:\n - An external identifier is required")
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load work record file.")
raise
def to_export_dict(self):
"""Map the funding record to dict for export into JSON/YAML."""
d = super().to_export_dict()
if self.journal_title:
d["journal-title"] = dict(value=self.journal_title)
d["short-description"] = self.short_description
if self.publication_date:
pd = self.publication_date.as_orcid_dict()
if self.publication_media_type:
pd["media-type"] = self.publication_media_type
d["publication-date"] = pd
if self.url:
d["url"] = self.url
if self.citation_type or self.citation_value:
d["citation"] = {
"citation-type": self.citation_type,
"citation-value": self.citation_value
}
if self.country:
d["country"] = dict(value=self.country)
return d
class Meta: # noqa: D101,D106
db_table = "work_record"
table_alias = "wr"
class ContributorModel(BaseModel):
"""Common model bits of the contributor records."""
orcid = OrcidIdField(null=True)
name = CharField(max_length=120, null=True)
role = CharField(max_length=120, null=True)
email = CharField(max_length=120, null=True)
def to_export_dict(self):
"""Map the contributor record to dict for export into JSON/YAML."""
return {
"contributor-attributes": {"contributor-role": self.role},
"contributor-email": dict(value=self.email),
"credit-name": dict(value=self.name),
"contributor-orcid": dict(path=self.orcid), }
class WorkContributor(ContributorModel):
"""Researcher or contributor - related to work."""
work_record = ForeignKeyField(
WorkRecord, related_name="contributors", on_delete="CASCADE")
contributor_sequence = CharField(max_length=120, null=True)
class Meta: # noqa: D101,D106
db_table = "work_contributor"
table_alias = "wc"
def to_export_dict(self):
"""Map the contributor record to dict for export into JSON/YAML."""
d = super().to_export_dict()
d["contributor-attributes"].update({"contributor-sequence": self.contributor_sequence})
return d
class FundingContributor(ContributorModel):
"""Researcher or contributor - receiver of the funding."""
funding_record = ForeignKeyField(
FundingRecord, related_name="contributors", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "funding_contributor"
table_alias = "fc"
class InviteeModel(BaseModel):
"""Common model bits of the invitees records."""
identifier = CharField(max_length=120, null=True)
email = CharField(max_length=120, null=True)
first_name = CharField(max_length=120, null=True)
last_name = CharField(max_length=120, null=True)
orcid = OrcidIdField(null=True)
put_code = IntegerField(null=True)
visibility = CharField(null=True, max_length=100)
status = TextField(null=True, help_text="Record processing status.")
processed_at = DateTimeField(null=True)
def save(self, *args, **kwargs):
"""Consistency validation and saving."""
if self.is_dirty() and self.email and self.field_is_updated("email"):
self.email = self.email.lower()
return super().save(*args, **kwargs)
def add_status_line(self, line):
"""Add a text line to the status for logging processing progress."""
ts = datetime.utcnow().isoformat(timespec="seconds")
self.status = (self.status + "\n" if self.status else '') + ts + ": " + line
def to_export_dict(self):
"""Get row representation suitable for export to JSON/YAML."""
c = self.__class__
d = self.to_dict(
to_dashes=True,
exclude_nulls=True,
only=[c.identifier, c.email, c.first_name, c.last_name, c.put_code, c.visibility],
recurse=False)
if self.orcid:
d["ORCID-iD"] = self.orcid
return d
class PeerReviewInvitee(InviteeModel):
"""Researcher or Invitee - related to peer review."""
peer_review_record = ForeignKeyField(
PeerReviewRecord, related_name="peer_review_invitee", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "peer_review_invitee"
table_alias = "pi"
class WorkInvitee(InviteeModel):
"""Researcher or Invitee - related to work."""
work_record = ForeignKeyField(
WorkRecord, related_name="invitees", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "work_invitees"
table_alias = "wi"
class FundingInvitee(InviteeModel):
"""Researcher or Invitee - related to funding."""
funding_record = ForeignKeyField(
FundingRecord, related_name="invitees", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "funding_invitees"
table_alias = "fi"
class ExternalIdModel(BaseModel):
"""Common model bits of the ExternalId records."""
relationship_choices = [(v, v.replace('_', ' ').title()) for v in ['', "PART_OF", "SELF"]]
type_choices = [(v, v.replace("_", " ").replace("-", " ").title()) for v in [
'', "agr", "ark", "arxiv", "asin", "asin-tld", "authenticusid", "bibcode", "cba",
"cienciaiul", "cit", "ctx", "dnb", "doi", "eid", "ethos", "grant_number", "handle", "hir",
"isbn", "issn", "jfm", "jstor", "kuid", "lccn", "lensid", "mr", "oclc", "ol", "osti",
"other-id", "pat", "pdb", "pmc", "pmid", "rfc", "rrid", "source-work-id", "ssrn", "uri",
"urn", "wosuid", "zbl"
]]
type = CharField(max_length=255, choices=type_choices)
value = CharField(max_length=255)
url = CharField(max_length=200, null=True)
relationship = CharField(max_length=255, choices=relationship_choices)
def to_export_dict(self):
"""Map the external ID record to dict for exprt into JSON/YAML."""
d = {
"external-id-type": self.type,
"external-id-value": self.value,
"external-id-relationship": self.relationship,
}
if self.url:
d["external-id-url"] = {"value": self.url}
return d
class WorkExternalId(ExternalIdModel):
"""Work ExternalId loaded for batch processing."""
work_record = ForeignKeyField(
WorkRecord, related_name="external_ids", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "work_external_id"
table_alias = "wei"
class PeerReviewExternalId(ExternalIdModel):
"""Peer Review ExternalId loaded for batch processing."""
peer_review_record = ForeignKeyField(
PeerReviewRecord, related_name="external_ids", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "peer_review_external_id"
table_alias = "pei"
class ExternalId(ExternalIdModel):
"""Funding ExternalId loaded for batch processing."""
funding_record = ForeignKeyField(
FundingRecord, related_name="external_ids", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "external_id"
table_alias = "ei"
class Delegate(BaseModel):
"""External applications that can be redirected to."""
hostname = CharField()
class Url(BaseModel, AuditMixin):
"""Shortened URLs."""
short_id = CharField(unique=True, max_length=5)
url = TextField()
@classmethod
def shorten(cls, url):
"""Create a shorten URL or retrieves an exiting one."""
try:
u = cls.get(url=url)
except cls.DoesNotExist:
while True:
short_id = ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(5))
if not cls.select().where(cls.short_id == short_id).exists():
break
u = cls.create(short_id=short_id, url=url)
return u
return u
class Funding(BaseModel):
"""Uploaded research Funding record."""
short_id = CharField(unique=True, max_length=5)
url = TextField()
class Client(BaseModel, AuditMixin):
"""API Client Application/Consumer.
A client is the app which wants to use the resource of a user.
It is suggested that the client is registered by a user on your site,
but it is not required.
"""
name = CharField(null=True, max_length=40, help_text="human readable name, not required")
homepage_url = CharField(null=True, max_length=100)
description = CharField(
null=True, max_length=400, help_text="human readable description, not required")
user = ForeignKeyField(
User, null=True, on_delete="SET NULL", help_text="creator of the client, not required")
org = ForeignKeyField(Organisation, on_delete="CASCADE", related_name="client_applications")
client_id = CharField(max_length=100, unique=True)
client_secret = CharField(max_length=55, unique=True)
is_confidential = BooleanField(null=True, help_text="public or confidential")
grant_type = CharField(max_length=18, default="client_credentials", null=True)
response_type = CharField(max_length=4, default="code", null=True)
_redirect_uris = TextField(null=True)
_default_scopes = TextField(null=True)
def save(self, *args, **kwargs): # noqa: D102
if self.is_dirty() and self.user_id is None and current_user:
self.user_id = current_user.id
return super().save(*args, **kwargs)
@property
def client_type(self): # noqa: D102
if self.is_confidential:
return 'confidential'
return 'public'
@property
def redirect_uris(self): # noqa: D102
if self._redirect_uris:
return self._redirect_uris.split()
return []
@redirect_uris.setter
def redirect_uris(self, value):
if value and isinstance(value, str):
self._redirect_uris = value
@property
def callback_urls(self): # noqa: D102
return self._redirect_uris
@callback_urls.setter
def callback_urls(self, value):
self._redirect_uris = value
@property
def default_redirect_uri(self): # noqa: D102
ru = self.redirect_uris
if not ru:
return None
return self.redirect_uris[0]
@property
def default_scopes(self): # noqa: D102
if self._default_scopes:
return self._default_scopes.split()
return []
def validate_scopes(self, scopes):
"""Validate client requested scopes."""
return "/webhook" in scopes or not scopes
def __repr__(self): # noqa: D102
return self.name or self.homepage_url or self.description
class Grant(BaseModel):
"""Grant Token / Authorization Code.
A grant token is created in the authorization flow, and will be destroyed when
the authorization is finished. In this case, it would be better to store the data
in a cache, which leads to better performance.
"""
user = ForeignKeyField(User, on_delete="CASCADE")
# client_id = db.Column(
# db.String(40), db.ForeignKey('client.client_id'),
# nullable=False,
# )
client = ForeignKeyField(Client, index=True, on_delete="CASCADE")
code = CharField(max_length=255, index=True)
redirect_uri = CharField(max_length=255, null=True)
expires = DateTimeField(null=True)
_scopes = TextField(null=True)
# def delete(self):
# super().delete().execute()
# return self
@property
def scopes(self): # noqa: D102
if self._scopes:
return self._scopes.split()
return []
@scopes.setter
def scopes(self, value): # noqa: D102
if isinstance(value, str):
self._scopes = value
else:
self._scopes = ' '.join(value)
class Token(BaseModel):
"""Bearer Token.
A bearer token is the final token that could be used by the client.
There are other token types, but bearer token is widely used.
Flask-OAuthlib only comes with a bearer token.
"""
client = ForeignKeyField(Client, on_delete="CASCADE")
user = ForeignKeyField(User, null=True, on_delete="SET NULL")
token_type = CharField(max_length=40)
access_token = CharField(max_length=100, unique=True)
refresh_token = CharField(max_length=100, unique=True, null=True)
expires = DateTimeField(null=True)
_scopes = TextField(null=True)
@property
def scopes(self): # noqa: D102
if self._scopes:
return self._scopes.split()
return []
@property
def expires_at(self): # noqa: D102
return self.expires
def readup_file(input_file):
"""Read up the whole content and decode it and return the whole content."""
raw = input_file.read()
for encoding in "utf-8-sig", "utf-8", "utf-16":
try:
return raw.decode(encoding)
except UnicodeDecodeError:
continue
return raw.decode("latin-1")
def create_tables():
"""Create all DB tables."""
try:
db.connect()
except OperationalError:
pass
for model in [
File,
Organisation,
User,
UserOrg,
OrcidToken,
UserOrgAffiliation,
OrgInfo,
OrcidApiCall,
OrcidAuthorizeCall,
Task,
Log,
AffiliationRecord,
GroupIdRecord,
OrgInvitation,
Url,
UserInvitation,
FundingRecord,
WorkRecord,
WorkContributor,
WorkExternalId,
WorkInvitee,
FundingContributor,
FundingInvitee,
ExternalId,
PeerReviewRecord,
PeerReviewInvitee,
PeerReviewExternalId,
ResearcherUrlRecord,
OtherNameRecord,
Client,
Grant,
Token,
Delegate,
]:
if not model.table_exists():
model.create_table()
def create_audit_tables():
"""Create all DB audit tables for PostgreSQL DB."""
try:
db.connect()
except OperationalError:
pass
if isinstance(db, PostgresqlDatabase):
with open(os.path.join(os.path.dirname(__file__), "sql", "auditing.sql"), 'br') as input_file:
sql = readup_file(input_file)
db.commit()
with db.get_cursor() as cr:
cr.execute(sql)
db.commit()
def drop_tables():
"""Drop all model tables."""
for m in (File, User, UserOrg, OtherNameRecord, OrcidToken, UserOrgAffiliation, OrgInfo, OrgInvitation,
OrcidApiCall, OrcidAuthorizeCall, FundingContributor, FundingInvitee, FundingRecord,
PeerReviewInvitee, PeerReviewExternalId, PeerReviewRecord, ResearcherUrlRecord,
WorkInvitee, WorkExternalId, WorkContributor, WorkRecord, AffiliationRecord, ExternalId, Url,
UserInvitation, Task, Organisation):
if m.table_exists():
try:
m.drop_table(fail_silently=True, cascade=m._meta.database.drop_cascade)
except OperationalError:
pass
def load_yaml_json(filename, source, content_type=None):
"""Create a common way of loading JSON or YAML file."""
if not content_type:
_, ext = os.path.splitext(filename or '')
if not ext:
source = source.strip()
content_type = "json" if ((not ext and (source.startswith('[') or source.startswith('{')))
or ext == ".json") else "yaml"
if content_type == "yaml":
data = json.loads(json.dumps(yaml.load(source)), object_pairs_hook=NestedDict)
else:
data = json.loads(source, object_pairs_hook=NestedDict)
# Removing None for correct schema validation
if not isinstance(data, list) and not (isinstance(data, dict) and "records" in data):
raise SchemaError(
u"Schema validation failed:\n - Expecting a list of Records")
return data
def del_none(d):
"""
Delete keys with the value ``None`` in a dictionary, recursively.
So that the schema validation will not fail, for elements that are none
"""
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
del_none(item)
elif isinstance(value, dict):
del_none(value)
return d
def get_val(d, *keys, default=None):
"""To get the value from uploaded fields."""
if isinstance(d, NestedDict):
return d.get(*keys, default=default)
for k in keys:
if not d:
break
d = d.get(k, default)
return d | PypiClean |
/DuHast-1.0.7-py3-none-any.whl/duHast/APISamples/RevitCeilings.py | '''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Revit ceilings helper functions.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#
#License:
#
#
# Revit Batch Processor Sample Code
#
# Copyright (c) 2021 Jan Christel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import clr
import System
# import common library modules
import RevitCommonAPI as com
import RevitFamilyUtils as rFam
import RevitGeometry as rGeo
import RevitDesignSetOptions as rDesignO
import DataCeiling as dCeiling
import RevitPhases as rPhase
# import Autodesk
import Autodesk.Revit.DB as rdb
clr.ImportExtensions(System.Linq)
# -------------------------------------------- common variables --------------------
#: header used in reports
REPORT_CEILINGS_HEADER = ['HOSTFILE', 'CEILINGTYPEID', 'CEILINGTYPENAME']
#: Built in family name for compound ceilings
COMPOUND_CEILING_FAMILY_NAME = 'Compound Ceiling'
#: Built in family name for basic ceilings
BASIC_CEILING_FAMILY_NAME = 'Basic Ceiling'
#: Built in family name for roof soffits
ROOF_SOFFIT_FAMILY_NAME = 'Roof Soffit'
#: List of all Built in ceiling family names
BUILTIN_CEILING_TYPE_FAMILY_NAMES = [
COMPOUND_CEILING_FAMILY_NAME,
BASIC_CEILING_FAMILY_NAME,
ROOF_SOFFIT_FAMILY_NAME
]
# --------------------------------------------- utility functions ------------------
def GetAllCeilingTypesByCategory(doc):
'''
Gets a filtered element collector of all ceiling types in the model:
- Compound Ceiling
- In place families or loaded families
- Basic Ceiling
Filters by category.
It will therefore not return any roof soffit types ..
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector containing ceiling types.
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
collector = rdb.FilteredElementCollector(doc).OfCategory(rdb.BuiltInCategory.OST_Ceilings).WhereElementIsElementType()
return collector
def GetCeilingTypesByClass(doc):
'''
Gets a filtered element collector of all ceiling types in the model:
- Roof Soffit
- Compound Ceiling
- Basic Ceiling
Filters by class.
It will therefore not return any in place family types.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector containing ceiling types.
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
return rdb.FilteredElementCollector(doc).OfClass(rdb.CeilingType)
def BuildCeilingTypeDictionary(collector, dic):
'''
Returns the dictionary past in with keys and or values added retrieved from collector past in.
Keys are built in ceiling family type names.
TODO: Use more generic code.
:param collector: A filtered element collector containing ceiling types.
:type collector: Autodesk.Revit.DB.FilteredElementCollector
:param dic: A dictionary containing key: ceiling type family name, value: list of ids belonging to that type.
:type dic: dictionary (key str, value list of Autodesk.Revit.DB.ElementId)
:return: A dictionary containing key: built in ceiling type family name, value: list of ids belonging to that type.
:rtype: dictionary (key str, value list of Autodesk.Revit.DB.ElementId)
'''
for c in collector:
if(dic.has_key(c.FamilyName)):
if(c.Id not in dic[c.FamilyName]):
dic[c.FamilyName].append(c.Id)
else:
dic[c.FamilyName] = [c.Id]
return dic
def SortCeilingTypesByFamilyName(doc):
'''
Returns a dictionary of all ceiling types in the model where key is the build in wall family name, values are ids of associated wall types.
TODO: Use more generic code.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A dictionary containing key: built in ceiling type family name, value: list of ids belonging to that type.
:rtype: dictionary (key str, value list of Autodesk.Revit.DB.ElementId)
'''
# get all ceiling Type Elements
wts = GetCeilingTypesByClass(doc)
# get all ceiling types including in place ceiling families
wts_two = GetAllCeilingTypesByCategory(doc)
usedWts = {}
usedWts = BuildCeilingTypeDictionary(wts, usedWts)
usedWts = BuildCeilingTypeDictionary(wts_two, usedWts)
return usedWts
# -------------------------------- none in place ceiling types -------------------------------------------------------
def GetAllCeilingInstancesInModelByCategory(doc):
'''
Gets all ceiling elements placed in model. Ignores roof soffits.
Filters by category.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector containing ceiling instances.
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
return rdb.FilteredElementCollector(doc).OfCategory(rdb.BuiltInCategory.OST_Ceilings).WhereElementIsNotElementType()
def GetAllCeilingInstancesInModelByClass(doc):
'''
Gets all ceiling elements placed in model. Ignores in place families.
Filters by class.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector containing ceiling instances.
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
return rdb.FilteredElementCollector(doc).OfClass(rdb.Ceiling).WhereElementIsNotElementType()
def GetAllCeilingTypeIdsInModelByCategory(doc):
'''
Gets all ceiling element type ids available in model.
Filters by category.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector containing ceiling type ids.
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
ids = []
colCat = GetAllCeilingTypesByCategory(doc)
ids = com.GetIdsFromElementCollector(colCat)
return ids
def GetAllCeilingTypeIdsInModelByClass(doc):
'''
Gets all ceiling element type ids available in model.
Filters by class.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector containing ceiling type ids.
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
ids = []
colClass = GetCeilingTypesByClass(doc)
ids = com.GetIdsFromElementCollector(colClass)
return ids
def GetUsedCeilingTypeIds(doc):
'''
Gets all used ceiling type ids.
Filters by category.
Used: at least one instance of this type is placed in the model.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: List of element ids representing used ceiling types.
:rtype: list of Autodesk.Revit.DB.ElementId
'''
ids = com.GetUsedUnusedTypeIds(doc, GetAllCeilingTypeIdsInModelByCategory, 1)
return ids
def FamilyNoTypesInUse(famTypeIds,unUsedTypeIds):
'''
Compares two lists of ids. True if any id is not in unUsedTypeIds.
TODO: check for more generic list comparison and remove this function.
:param famTypeIds: List of family type ids to check.
:type famTypeIds: List of Autodesk.Revit.DB.ElementId
:param unUsedTypeIds: Reference list of ids.
:type unUsedTypeIds: List of Autodesk.Revit.DB.ElementId
:return: True if any id from famTypeIds is not in unUsedTypeIds.
:rtype: bool
'''
match = True
for famTypeId in famTypeIds:
if (famTypeId not in unUsedTypeIds):
match = False
break
return match
def GetUnusedNonInPlaceCeilingTypeIdsToPurge(doc):
'''
Gets all unused ceiling type id's.
- Roof Soffit
- Compound Ceiling
- Basic Ceiling
This method can be used to safely delete unused ceiling types:
In the case that no ceiling instance using any of the types is placed this will return all but one type id since\
Revit requires at least one ceiling type definition to be in the model.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: List of element ids representing not used ceiling types.
:rtype: list of Autodesk.Revit.DB.ElementId
'''
# get unused type ids
ids = com.GetUsedUnusedTypeIds(doc, GetAllCeilingTypeIdsInModelByClass, 0)
# make sure there is at least on ceiling type per system family left in model
ceilingTypes = SortCeilingTypesByFamilyName(doc)
for key, value in ceilingTypes.items():
if(key in BUILTIN_CEILING_TYPE_FAMILY_NAMES):
if(FamilyNoTypesInUse(value,ids) == True):
# remove one type of this system family from unused list
ids.remove(value[0])
return ids
# -------------------------------- In place ceiling types -------------------------------------------------------
def GetInPlaceCeilingFamilyInstances(doc):
'''
Gets all instances of in place families of category ceiling.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector containing in place ceiling instances.
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
filter = rdb.ElementCategoryFilter(rdb.BuiltInCategory.OST_Ceilings)
return rdb.FilteredElementCollector(doc).OfClass(rdb.FamilyInstance).WherePasses(filter)
def GetAllInPlaceCeilingTypeIdsInModel(doc):
'''
Gets all type ids off all available in place families of category ceiling.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: List of element ids representing in place ceiling types.
:rtype: list of Autodesk.Revit.DB.ElementId
'''
ids = rFam.GetAllInPlaceTypeIdsInModelOfCategory(doc, rdb.BuiltInCategory.OST_Ceilings)
return ids
def GetUsedInPlaceCeilingTypeIds(doc):
'''
Gets all used in place ceiling type ids in the model.
Used: at least one instance of this type is placed in the model.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: List of element ids representing used in place ceiling types.
:rtype: list of Autodesk.Revit.DB.ElementId
'''
ids = com.GetUsedUnusedTypeIds(doc, GetAllInPlaceCeilingTypeIdsInModel, 1)
return ids
def GetUnusedInPlaceCeilingTypeIds(doc):
'''
Gets all unused in place ceiling type ids in the model.
Unused: Not one instance of this type is placed in the model.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: List of element ids representing unused in place ceiling types.
:rtype: list of Autodesk.Revit.DB.ElementId
'''
ids = com.GetUsedUnusedTypeIds(doc, GetAllInPlaceCeilingTypeIdsInModel, 0)
return ids
def GetUnusedInPlaceCeilingIdsForPurge(doc):
'''
Gets symbol(type) ids and family ids (when no type is in use) of in place ceiling families which can be safely deleted from the model.
This method can be used to safely delete unused in place ceiling types. There is no requirement by Revit to have at least one\
in place ceiling definition in the model.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: List of element ids representing unused in place ceiling types and families.
:rtype: list of Autodesk.Revit.DB.ElementId
'''
ids = rFam.GetUnusedInPlaceIdsForPurge(doc, GetUnusedInPlaceCeilingTypeIds)
return ids
# -------------------------------- ceiling geometry -------------------------------------------------------
def Get2DPointsFromRevitCeiling(ceiling):
'''
Returns a list of lists of points representing the flattened(2D geometry) of the ceiling
List of Lists because a ceiling can be made up of multiple sketches. Each nested list represents one ceiling sketch.
Does not work with in place ceilings
:param ceiling: A revit ceiling instance.
:type ceiling: Autodesk.Revit.DB.Ceiling
:return: A list of data geometry instances.
:rtype: list of :class:`.DataGeometry`
'''
allCeilingPoints = []
# get geometry from ceiling
opt = rdb.Options()
fr1_geom = ceiling.get_Geometry(opt)
solids = []
# check geometry for Solid elements
# todo check for FamilyInstance geometry ( in place families!)
for item in fr1_geom:
if(type(item) is rdb.Solid):
solids.append(item)
# process solids to points
# in place families may have more then one solid
for s in solids:
pointPerCeilings = rGeo.ConvertSolidToFlattened2DPoints(s)
if(len(pointPerCeilings) > 0):
for pLists in pointPerCeilings:
allCeilingPoints.append(pLists)
return allCeilingPoints
def Get2DPointsFromRevitCeilingsInModel(doc):
'''
Returns a list of lists of points representing the flattened(2D geometry) of the ceiling
List of Lists because a ceiling can be made up of multiple sketches. Each nested list represents one ceiling sketch.
Does not work with in place ceilings
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A list of data geometry instances.
:rtype: list of :class:`.DataGeometry`
'''
ceilingInstances = GetAllCeilingInstancesInModelByCategory(doc)
allCeilingPoints = []
for cI in ceilingInstances:
ceilingPoints = Get2DPointsFromRevitCeiling(cI)
if(len(ceilingPoints) > 0 ):
allCeilingPoints.append (ceilingPoints)
return allCeilingPoints
# -------------------------------- ceiling data -------------------------------------------------------
def GetAllCeilingData(doc):
'''
Gets a list of ceiling data objects for each ceiling element in the model.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A list of data ceiling instances.
:rtype: list of :class:`.DataCeiling`
'''
allCeilingData = []
ceilings = GetAllCeilingInstancesInModelByCategory(doc)
for ceiling in ceilings:
cd = PopulateDataCeilingObject(doc, ceiling)
if(cd is not None):
allCeilingData.append(cd)
return allCeilingData
def PopulateDataCeilingObject(doc, revitCeiling):
'''
Returns a custom ceiling data objects populated with some data from the revit model ceiling past in.
- ceiling id
- ceiling type name
- ceiling mark
- ceiling type mark
- ceiling level name
- ceiling level id
- ceiling offset from level
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:param revitCeiling: A revit ceiling instance.
:type revitCeiling: Autodesk.Revit.DB.Ceiling
:return: A data ceiling object instance.
:rtype: :class:`.DataCeiling`
'''
# set up data class object
dataC = dCeiling.DataCeiling()
# get ceiling geometry (boundary points)
revitGeometryPointGroups = Get2DPointsFromRevitCeiling(revitCeiling)
if(len(revitGeometryPointGroups) > 0):
ceilingPointGroupsAsDoubles = []
for allCeilingPointGroups in revitGeometryPointGroups:
dataGeoConverted = rGeo.ConvertXYZInDataGeometry(doc, allCeilingPointGroups)
ceilingPointGroupsAsDoubles.append(dataGeoConverted)
dataC.geometry = ceilingPointGroupsAsDoubles
# get other data
dataC.designSetAndOption = rDesignO.GetDesignSetOptionInfo(doc, revitCeiling)
ceilingTypeId = revitCeiling.GetTypeId()
ceilingType = doc.GetElement(ceilingTypeId)
dataC.id = revitCeiling.Id.IntegerValue
dataC.typeName = rdb.Element.Name.GetValue(revitCeiling).encode('utf-8')
dataC.mark = com.GetBuiltInParameterValue(revitCeiling, rdb.BuiltInParameter.ALL_MODEL_MARK) # need to get the mark here...
dataC.typeMark = com.GetBuiltInParameterValue(ceilingType, rdb.BuiltInParameter.ALL_MODEL_TYPE_MARK)
dataC.levelName = rdb.Element.Name.GetValue(doc.GetElement(revitCeiling.LevelId)).encode('utf-8')
dataC.levelId = revitCeiling.LevelId.IntegerValue
dataC.offsetFromLevel = com.GetBuiltInParameterValue(revitCeiling, rdb.BuiltInParameter.CEILING_HEIGHTABOVELEVEL_PARAM) # offset from level
# get the model name
if(doc.IsDetached):
dataC.modelName = 'Detached Model'
else:
dataC.modelName = doc.Title
# get phasing information
dataC.phaseCreated = rPhase.GetPhaseNameById(doc, com.GetBuiltInParameterValue(revitCeiling, rdb.BuiltInParameter.PHASE_CREATED, com.GetParameterValueAsElementId)).encode('utf-8')
dataC.phaseDemolished = rPhase.GetPhaseNameById(doc, com.GetBuiltInParameterValue(revitCeiling, rdb.BuiltInParameter.PHASE_DEMOLISHED, com.GetParameterValueAsElementId)).encode('utf-8')
return dataC
else:
return None | PypiClean |
/Nikippe-0.1.11.tar.gz/Nikippe-0.1.11/nikippe/renderer/sequentialchart.py | from nikippe.renderer.achart import AChart
class SequentialChart(AChart):
"""
Regular chart with the latest values added at the right and the oldest values on the left. If full, the chart is
shifted to left; the oldest entry removed.
no additional yaml entries necessary.
"""
def __init__(self, config, update_available, mqtt_client, logger):
"""
Constructor
:param config: config yaml structure
:param update_available: Event instance. provided by renderer
:param mqtt_client: mymqttclient instance
:param logger: logger instance
"""
AChart.__init__(self, config, update_available, mqtt_client, logger, self.__class__.__name__)
def _update_chartimage(self, draw, minimum_value, maximum_value):
value_range = maximum_value - minimum_value
x = self._x1
last_x = x
last_y = None
with self._history_service.history_lock:
self._logger.info("CircularChart.updateImage() - acquired _history_lock")
for entry in self._history_service.history:
value = entry["value"]
if value is not None:
value = min(max(value, minimum_value), maximum_value)
int_value = value - minimum_value
try:
norm_value = int_value / value_range
except ZeroDivisionError:
norm_value = 0
y = self._y2 - int(norm_value * self._chart_height)
if self._chart_connect_values:
if last_y is None:
last_y = y
draw.line((last_x, last_y, x, y), fill=self._foreground_color, width=1)
last_x, last_y = x, y
else:
draw.line((x, y, x, y), fill=self._foreground_color, width=1)
self._logger.debug(
"SequentialChart.updateImage() - draw history. value:{}, dot:@({}/{})".format(value, x, y))
else:
self._logger.debug("SequentialChart.updateImage() - draw history. skipping empty entry (x={})".
format(x))
x += self._chart_pixel_per_value
self._logger.info("CircularChart.updateImage() - acquired _history_lock") | PypiClean |
/OBP_security_pillar_2-0.0.4.tar.gz/OBP_security_pillar_2-0.0.4/OBP_security_pillar_2/lambda_inside_vpc.py | import logging
from botocore.exceptions import ClientError
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# returns the list of lambda functions
def list_lambda_functions(client) -> dict:
"""
:param client:
:param self:
:return:
"""
logger.info(" ---Inside lambdafn.utils :: list_lambda_functions")
function_lst = []
marker = ''
while True:
if marker == '' or marker is None:
response = client.list_functions()
else:
response = client.list_functions(
Marker=marker
)
for fn in response['Functions']:
function_lst.append(fn)
try:
marker = response['NextMarker']
if marker == '':
break
except KeyError:
break
return function_lst
# checks the compliance for lambda-inside-vpc
def lambda_inside_vpc(self) -> dict:
"""
:param self:
:return:
"""
logger.info(" ---Inside lambda :: lambda_inside_vpc()")
result = True
failReason = ''
offenders = []
control_id = 'Id3.72'
compliance_type = "Lambda DLQ check"
description = "Checks whether an AWS Lambda function is configured with a dead-letter queue."
resource_type = "AWS Lambda"
risk_level = 'Medium'
regions = self.session.get_available_regions('lambda')
for region in regions:
try:
client = self.session.client('lambda', region_name=region)
function_lst = list_lambda_functions(client)
for function in function_lst:
try:
vpc_id = function['VpcConfig']['VpcId']
except KeyError:
result = False
offenders.append(function['FunctionName'])
failReason = 'Lambda function is not VPC enabled'
except ClientError as e:
logger.error("Something went wrong with the region {}: {}".format(region, e))
return {
'Result': result,
'failReason': failReason,
'resource_type': resource_type,
'Offenders': offenders,
'Compliance_type': compliance_type,
'Description': description,
'Risk Level': risk_level,
'ControlId': control_id
} | PypiClean |
/ModelSEEDpy-freiburgermsu-0.3.1.4.tar.gz/ModelSEEDpy-freiburgermsu-0.3.1.4/modelseedpy_freiburgermsu/core/rpcclient.py |
from __future__ import absolute_import
import json as _json
import requests as _requests
import random as _random
class _JSONObjectEncoder(_json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return _json.JSONEncoder.default(self, obj)
class ServerError(Exception):
def __init__(self, name, code, message=None, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message or ""
self.data = data or error or ""
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return (
self.name + ": " + str(self.code) + ". " + self.message + "\n" + self.data
)
class RPCClient:
def __init__(
self,
url,
token=None,
version="1.0",
timeout=30 * 60,
trust_all_ssl_certificates=False,
):
self.url = url
self.token = token
self.version = version
self.timeout = timeout
self.trust_all_ssl_certificates = trust_all_ssl_certificates
def call(self, method, params, token=None):
headers = {}
if token:
headers["AUTHORIZATION"] = token
elif self.token:
headers["AUTHORIZATION"] = self.token
arg_hash = {
"method": method,
"params": params,
"version": self.version,
"id": str(_random.random())[2:],
"context": {},
}
body = _json.dumps(arg_hash, cls=_JSONObjectEncoder)
ret = _requests.post(
self.url,
data=body,
headers=headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates,
)
ret.encoding = "utf-8"
if ret.status_code == 500:
if ret.headers.get("content-type") == "application/json":
err = ret.json()
if "error" in err:
raise ServerError(**err["error"])
else:
raise ServerError("Unknown", 0, ret.text)
else:
raise ServerError("Unknown", 0, ret.text)
if not ret.ok:
ret.raise_for_status()
resp = ret.json()
if "result" not in resp:
raise ServerError("Unknown", 0, "An unknown server error occurred")
if not resp["result"]:
return None
return resp["result"] | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/geonode/js/ol-2.13/lib/OpenLayers/Control/KeyboardDefaults.js | * @requires OpenLayers/Control.js
* @requires OpenLayers/Handler/Keyboard.js
* @requires OpenLayers/Events.js
*/
/**
* Class: OpenLayers.Control.KeyboardDefaults
* The KeyboardDefaults control adds panning and zooming functions, controlled
* with the keyboard. By default arrow keys pan, +/- keys zoom & Page Up/Page
* Down/Home/End scroll by three quarters of a page.
*
* This control has no visible appearance.
*
* Inherits from:
* - <OpenLayers.Control>
*/
OpenLayers.Control.KeyboardDefaults = OpenLayers.Class(OpenLayers.Control, {
/**
* APIProperty: autoActivate
* {Boolean} Activate the control when it is added to a map. Default is
* true.
*/
autoActivate: true,
/**
* APIProperty: slideFactor
* Pixels to slide by.
*/
slideFactor: 75,
/**
* APIProperty: observeElement
* {DOMelement|String} The DOM element to handle keys for. You
* can use the map div here, to have the navigation keys
* work when the map div has the focus. If undefined the
* document is used.
*/
observeElement: null,
/**
* Constructor: OpenLayers.Control.KeyboardDefaults
*/
/**
* Method: draw
* Create handler.
*/
draw: function() {
var observeElement = this.observeElement || document;
this.handler = new OpenLayers.Handler.Keyboard( this,
{"keydown": this.defaultKeyPress},
{observeElement: observeElement}
);
},
/**
* Method: defaultKeyPress
* When handling the key event, we only use evt.keyCode. This holds
* some drawbacks, though we get around them below. When interpretting
* the keycodes below (including the comments associated with them),
* consult the URL below. For instance, the Safari browser returns
* "IE keycodes", and so is supported by any keycode labeled "IE".
*
* Very informative URL:
* http://unixpapa.com/js/key.html
*
* Parameters:
* evt - {Event}
*/
defaultKeyPress: function (evt) {
var size, handled = true;
var target = OpenLayers.Event.element(evt);
if (target &&
(target.tagName == 'INPUT' ||
target.tagName == 'TEXTAREA' ||
target.tagName == 'SELECT')) {
return;
}
switch (evt.keyCode) {
case OpenLayers.Event.KEY_LEFT:
this.map.pan(-this.slideFactor, 0);
break;
case OpenLayers.Event.KEY_RIGHT:
this.map.pan(this.slideFactor, 0);
break;
case OpenLayers.Event.KEY_UP:
this.map.pan(0, -this.slideFactor);
break;
case OpenLayers.Event.KEY_DOWN:
this.map.pan(0, this.slideFactor);
break;
case 33: // Page Up. Same in all browsers.
size = this.map.getSize();
this.map.pan(0, -0.75*size.h);
break;
case 34: // Page Down. Same in all browsers.
size = this.map.getSize();
this.map.pan(0, 0.75*size.h);
break;
case 35: // End. Same in all browsers.
size = this.map.getSize();
this.map.pan(0.75*size.w, 0);
break;
case 36: // Home. Same in all browsers.
size = this.map.getSize();
this.map.pan(-0.75*size.w, 0);
break;
case 43: // +/= (ASCII), keypad + (ASCII, Opera)
case 61: // +/= (Mozilla, Opera, some ASCII)
case 187: // +/= (IE)
case 107: // keypad + (IE, Mozilla)
this.map.zoomIn();
break;
case 45: // -/_ (ASCII, Opera), keypad - (ASCII, Opera)
case 109: // -/_ (Mozilla), keypad - (Mozilla, IE)
case 189: // -/_ (IE)
case 95: // -/_ (some ASCII)
this.map.zoomOut();
break;
default:
handled = false;
}
if (handled) {
// prevent browser default not to move the page
// when moving the page with the keyboard
OpenLayers.Event.stop(evt);
}
},
CLASS_NAME: "OpenLayers.Control.KeyboardDefaults"
}); | PypiClean |
/Bottleneck-1.3.7rc1-cp36-cp36m-macosx_10_9_x86_64.whl/bottleneck/slow/nonreduce_axis.py | import numpy as np
from numpy import partition, argpartition
__all__ = ["rankdata", "nanrankdata", "partition", "argpartition", "push"]
def rankdata(a, axis=None):
"Slow rankdata function used for unaccelerated dtypes."
return _rank(scipy_rankdata, a, axis)
def nanrankdata(a, axis=None):
"Slow nanrankdata function used for unaccelerated dtypes."
return _rank(_nanrankdata_1d, a, axis)
def _rank(func1d, a, axis):
a = np.array(a, copy=False)
if axis is None:
a = a.ravel()
axis = 0
if a.size == 0:
y = a.astype(np.float64, copy=True)
else:
y = np.apply_along_axis(func1d, axis, a)
if a.dtype != np.float64:
y = y.astype(np.float64)
return y
def _nanrankdata_1d(a):
y = np.empty(a.shape, dtype=np.float64)
y.fill(np.nan)
idx = ~np.isnan(a)
y[idx] = scipy_rankdata(a[idx])
return y
def push(a, n=None, axis=-1):
"Slow push used for unaccelerated dtypes."
if n is None:
n = np.inf
y = np.array(a)
ndim = y.ndim
if axis != -1 or axis != ndim - 1:
y = np.rollaxis(y, axis, ndim)
if ndim == 1:
y = y[None, :]
elif ndim == 0:
return y
fidx = ~np.isnan(y)
recent = np.empty(y.shape[:-1])
count = np.empty(y.shape[:-1])
recent.fill(np.nan)
count.fill(np.nan)
with np.errstate(invalid="ignore"):
for i in range(y.shape[-1]):
idx = (i - count) > n
recent[idx] = np.nan
idx = ~fidx[..., i]
y[idx, i] = recent[idx]
idx = fidx[..., i]
count[idx] = i
recent[idx] = y[idx, i]
if axis != -1 or axis != ndim - 1:
y = np.rollaxis(y, ndim - 1, axis)
if ndim == 1:
return y[0]
return y
# ---------------------------------------------------------------------------
#
# SciPy
#
# Local copy of SciPy's rankdata to avoid a SciPy dependency. The SciPy
# license is included in the Bottleneck license file, which is distributed
# with Bottleneck.
#
# Code taken from scipy master branch on Aug 31, 2016.
def scipy_rankdata(a, method="average"):
"""
rankdata(a, method='average')
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ("average", "min", "max", "dense", "ordinal"):
raise ValueError('unknown method "{0}"'.format(method))
a = np.ravel(np.asarray(a))
algo = "mergesort" if method == "ordinal" else "quicksort"
sorter = np.argsort(a, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == "ordinal":
return inv + 1
a = a[sorter]
obs = np.r_[True, a[1:] != a[:-1]]
dense = obs.cumsum()[inv]
if method == "dense":
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == "max":
return count[dense]
if method == "min":
return count[dense - 1] + 1
# average method
return 0.5 * (count[dense] + count[dense - 1] + 1) | PypiClean |
/Helios_Scanner-1.1-py3-none-any.whl/helios/ext/libcms/detector.py | import requests
# because I am too lazy to replace all the if statements
class InvalidRequestObject:
status_code = 404
text = ""
class CMSDetector:
headers = {}
cookies = {}
def __init__(self, user_agent=None, headers={}, cookies={}):
self.headers = headers
self.cookies = cookies
if user_agent:
self.headers['User-Agent'] = user_agent
def get(self, url):
try:
return requests.get(url, allow_redirects=False, headers=self.headers, cookies=self.cookies)
except:
return InvalidRequestObject()
def scan_sub(self, base, path):
if base.endswith('/'):
base = base[:-1]
url = "%s/%s/" % (base, path)
result = self.get(url)
# 2xx
if result.status_code != 404 and "not found" not in result.text.lower():
return self.scan(url)
return None
def scan(self, base):
if base.endswith('/'):
base = base[:-1]
wpLoginCheck = self.get(base + '/wp-login.php')
if wpLoginCheck.status_code == 200 and "user_login" in wpLoginCheck.text and "404" not in wpLoginCheck.text:
return "wordpress"
wpAdminCheck = self.get(base + '/wp-admin')
if wpAdminCheck.status_code == 200 and "user_login" in wpAdminCheck.text and "404" not in wpLoginCheck.text:
return "wordpress"
wpAdminUpgradeCheck = self.get(base + '/wp-admin/upgrade.php')
if wpAdminUpgradeCheck.status_code == 200 and "404" not in wpAdminUpgradeCheck.text:
return "wordpress"
wpAdminReadMeCheck = self.get(base + '/readme.html')
if wpAdminReadMeCheck.status_code == 200 and "404" not in wpAdminReadMeCheck.text:
return "wordpress"
####################################################
# Joomla Scans
####################################################
joomlaAdminCheck = self.get(base + '/administrator/')
if joomlaAdminCheck.status_code == 200 and "mod-login-username" in joomlaAdminCheck.text and "404" not in joomlaAdminCheck.text:
return "joomla"
joomlaReadMeCheck = self.get(base + '/readme.txt')
if joomlaReadMeCheck.status_code == 200 and "joomla" in joomlaReadMeCheck.text and "404" not in joomlaReadMeCheck.text:
return "joomla"
joomlaTagCheck = self.get(base)
if joomlaTagCheck.status_code == 200 and 'name="generator" content="Joomla' in joomlaTagCheck.text and "404" not in joomlaTagCheck.text:
return "joomla"
joomlaStringCheck = self.get(base)
if joomlaStringCheck.status_code == 200 and "joomla" in joomlaStringCheck.text and "404" not in joomlaStringCheck.text:
return "joomla"
joomlaDirCheck = self.get(base + '/media/com_joomlaupdate/')
if joomlaDirCheck.status_code == 403 and "404" not in joomlaDirCheck.text:
return "joomla"
####################################################
# Magento Scans
####################################################
magentoAdminCheck = self.get(base + '/index.php/admin/')
if magentoAdminCheck.status_code == 200 and 'login' in magentoAdminCheck.text and 'magento' in magentoAdminCheck.text and "404" not in magentoAdminCheck.text:
return "magento"
magentoRelNotesCheck = self.get(base + '/RELEASE_NOTES.txt')
if magentoRelNotesCheck.status_code == 200 and 'magento' in magentoRelNotesCheck.text:
return "magento"
magentoCookieCheck = self.get(base + '/js/mage/cookies.js')
if magentoCookieCheck.status_code == 200 and "404" not in magentoCookieCheck.text:
return "magento"
magStringCheck = self.get(base + '/index.php')
if magStringCheck.status_code == 200 and '/mage/' in magStringCheck.text or 'magento' in magStringCheck.text:
return "magento"
magentoStylesCSSCheck = self.get(base + '/skin/frontend/default/default/css/styles.css')
if magentoStylesCSSCheck.status_code == 200 and "404" not in magentoStylesCSSCheck.text:
return "magento"
mag404Check = self.get(base + '/errors/design.xml')
if mag404Check.status_code == 200 and "magento" in mag404Check.text:
return "magento"
####################################################
# Drupal Scans
####################################################
drupalReadMeCheck = self.get(base + '/readme.txt')
if drupalReadMeCheck.status_code == 200 and 'drupal' in drupalReadMeCheck.text and '404' not in drupalReadMeCheck.text:
return "drupal"
drupalTagCheck = self.get(base)
if drupalTagCheck.status_code == 200 and 'name="Generator" content="Drupal' in drupalTagCheck.text:
return "drupal"
drupalCopyrightCheck = self.get(base + '/core/COPYRIGHT.txt')
if drupalCopyrightCheck.status_code == 200 and 'Drupal' in drupalCopyrightCheck.text and '404' not in drupalCopyrightCheck.text:
return "drupal"
drupalReadme2Check = self.get(base + '/modules/README.txt')
if drupalReadme2Check.status_code == 200 and 'drupal' in drupalReadme2Check.text and '404' not in drupalReadme2Check.text:
return "drupal"
drupalStringCheck = self.get(base)
if drupalStringCheck.status_code == 200 and 'drupal' in drupalStringCheck.text:
return "drupal"
return None | PypiClean |
/FanFicFare-4.27.0.tar.gz/FanFicFare-4.27.0/fanficfare/adapters/adapter_chosentwofanficcom.py |
# Copyright 2012 Fanficdownloader team, 2018 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import re
from bs4.element import Comment
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
# py2 vs py3 transition
from ..six import text_type as unicode
from .base_adapter import BaseSiteAdapter, makeDate
def getClass():
return ChosenTwoFanFicArchiveAdapter
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class ChosenTwoFanFicArchiveAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
# normalized story URL.
self._setURL('https://' + self.getSiteDomain() + '/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','chosen2')
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%m/%d/%Y"
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'chosentwofanfic.com'
@classmethod
def getSiteExampleURLs(cls):
return "https://"+cls.getSiteDomain()+"/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return r"https?"+re.escape("://"+self.getSiteDomain()+"/viewstory.php?sid=")+r"\d+$"
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
# checking to see if the is_adult is set to true
if self.is_adult or self.getConfig("is_adult"):
addURL = "&ageconsent=ok&warning=3"
else:
addURL = ""
# index=1 makes sure we see the story chapter index. Some
# sites skip that for one-chapter stories.
url = '{0}&index=1{1}'.format(self.url,addURL)
logger.debug("URL: "+url)
data = self.get_request(url)
if "Content is only suitable for mature adults. May contain explicit language and adult themes. Equivalent of NC-17." in data:
raise exceptions.AdultCheckRequired(self.url)
if "Access denied. This story has not been validated by the adminstrators of this site." in data:
raise exceptions.AccessDenied("{0} says: Access denied. This story has not been validated by the adminstrators of this site.".format(self.getSiteDomain()))
soup = self.make_soup(data)
## Title
## Some stories have a banner that has it's own a tag before the actual text title...
## so I'm checking the pagetitle div for all a tags that match the criteria, then taking the last.
a = soup.find('div',{'id':'pagetitle'}).findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))[-1]
self.story.setMetadata('title',stripHTML(a))
# Find authorid and URL from... author url.
# This site lists the newest member to the site before the div that has the story info
# so I'm checking the pagetitle div for this as well
a = soup.find('div',{'id':'pagetitle'}).find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl','https://'+self.host+'/'+a['href'])
self.story.setMetadata('author',a.string)
# Find the chapters:
for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+r"&chapter=\d+$")):
# just in case there's tags, like <i> in chapter titles.
#self.add_chapter(chapter,'http://'+self.host+'/'+chapter['href'])
self.add_chapter(chapter,'https://{0}/{1}{2}'.format(self.host, chapter['href'],addURL))
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def defaultGetattr(d,k):
try:
return d[k]
except:
return ""
# <span class="label">Rated:</span> NC-17<br /> etc
labels = soup.findAll('span',{'class':'label'})
for labelspan in labels:
val = labelspan.nextSibling
value = unicode('')
while val and not 'label' in defaultGetattr(val,'class'):
# print("val:%s"%val)
if not isinstance(val,Comment):
value += unicode(val)
val = val.nextSibling
label = labelspan.string
# print("label:%s\nvalue:%s"%(label,value))
if 'Summary' in label:
self.setDescription(url,value)
if 'Rated' in label:
self.story.setMetadata('rating', stripHTML(value))
if 'Word count' in label:
self.story.setMetadata('numWords', stripHTML(value))
if 'Categories' in label:
cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
for cat in cats:
self.story.addToList('category',cat.string)
if 'Characters' in label:
chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
for char in chars:
self.story.addToList('characters',char.string)
if 'Genre' in label:
genres = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=1')) # XXX
for genre in genres:
self.story.addToList('genre',genre.string)
if 'Pairing' in label:
ships = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=4'))
for ship in ships:
self.story.addToList('ships',ship.string)
if 'Warnings' in label:
warnings = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=2')) # XXX
for warning in warnings:
self.story.addToList('warnings',warning.string)
if 'Completed' in label:
if 'Yes' in stripHTML(value):
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
if 'Published' in label:
self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat))
if 'Updated' in label:
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
if 'Disclaimer' in label:
self.story.setMetadata('disclaimer', stripHTML(value))
try:
# Find Series name from series URL.
a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+"))
series_name = a.string
series_url = 'https://'+self.host+'/'+a['href']
seriessoup = self.make_soup(self.get_request(series_url))
# can't use ^viewstory...$ in case of higher rated stories with javascript href.
storyas = seriessoup.findAll('a', href=re.compile(r'viewstory.php\?sid=\d+'))
i=1
for a in storyas:
# this site has several links to each story.
if a.text == 'Latest Chapter':
if ('viewstory.php?sid='+self.story.getMetadata('storyId')) in a['href']:
self.setSeries(series_name, i)
self.story.setMetadata('seriesUrl',series_url)
break
i+=1
except:
# I find it hard to care if the series parsing fails
pass
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self.get_request(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div) | PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/Backend/backend.py |
import copy
import os
import sys
import warnings
import json
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Exceptions import (
EnvError,
DivergingPowerFlow,
IncorrectNumberOfElements,
IncorrectNumberOfLoads,
)
from grid2op.Exceptions import (
IncorrectNumberOfGenerators,
BackendError,
IncorrectNumberOfLines,
)
from grid2op.Space import GridObjects
from grid2op.Exceptions import Grid2OpException
# TODO method to get V and theta at each bus, could be in the same shape as check_kirchoff
class Backend(GridObjects, ABC):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Unless if you want to code yourself a backend this is not recommend to alter it
or use it directly in any way.
If you want to code a backend, an example is given in :class:`PandaPowerBackend` (
or in the repository lightsim2grid on github)
This documentation is present mainly for exhaustivity. It is not recommended to manipulate a Backend
directly. Prefer using an :class:`grid2op.Environment.Environment`
This is a base class for each :class:`Backend` object.
It allows to run power flow smoothly, and abstract the method of computing cascading failures.
This class allow the user or the agent to interact with an power flow calculator, while relying on dedicated
methods to change the power grid behaviour.
It is NOT recommended to use this class outside the Environment.
An example of a valid backend is provided in the :class:`PandaPowerBackend`.
All the abstract methods (that need to be implemented for a backend to work properly) are (more information given
in the :ref:`create-backend-module` page):
- :func:`Backend.load_grid`
- :func:`Backend.apply_action`
- :func:`Backend.runpf`
- :func:`Backend.get_topo_vect`
- :func:`Backend.generators_info`
- :func:`Backend.loads_info`
- :func:`Backend.lines_or_info`
- :func:`Backend.lines_ex_info`
And optionally:
- :func:`Backend.close` (this is mandatory if your backend implementation (`self._grid`) is relying on some
c / c++ code that do not free memory automatically.
- :func:`Backend.copy` (not that this is mandatory if your backend implementation (in `self._grid`) cannot be
deep copied using the python copy.deepcopy function) [as of grid2op >= 1.7.1 it is no more
required. If not implemented, you won't be able to use some of grid2op feature however]
- :func:`Backend.get_line_status`: the default implementation uses the "get_topo_vect()" and then check
if buses at both ends of powerline are positive. This is rather slow and can most likely be optimized.
- :func:`Backend.get_line_flow`: the default implementation will retrieve all powerline information
at the "origin" side and just return the "a_or" vector. You want to do something smarter here.
- :func:`Backend._disconnect_line`: has a default slow implementation using "apply_action" that might
can most likely be optimized in your backend.
- :func:`Backend.reset` will reload the powergrid from the hard drive by default. This is rather slow and we
recommend to overload it.
And, if the flag :attr:Backend.shunts_data_available` is set to ``True`` the method :func:`Backend.shunt_info`
should also be implemented.
.. note:: Backend also support "shunts" information if the `self.shunts_data_available` flag is set to
``True`` in that case, you also need to implement all the relevant shunt information (attributes `n_shunt`,
`shunt_to_subid`, `name_shunt` and function `shunt_info` and handle the modification of shunts
bus, active value and reactive value in the "apply_action" function).
In order to be valid and carry out some computations, you should call :func:`Backend.load_grid` and later
:func:`grid2op.Spaces.GridObjects.assert_grid_correct`. It is also more than recommended to call
:func:`Backend.assert_grid_correct_after_powerflow` after the first powerflow. This is all carried ou in the
environment properly.
Attributes
----------
detailed_infos_for_cascading_failures: :class:`bool`
Whether to be verbose when computing a cascading failure.
thermal_limit_a: :class:`numpy.array`, dtype:float
Thermal limit of the powerline in amps for each powerline. Thie thermal limit is relevant on only one
side of the powerline: the same side returned by :func:`Backend.get_line_overflow`
comp_time: ``float``
Time to compute the powerflow (might be unset, ie stay at 0.0)
"""
IS_BK_CONVERTER = False
env_name = "unknown"
# action to set me
my_bk_act_class = None
_complete_action_class = None
ERR_INIT_POWERFLOW = "Power cannot be computed on the first time step, please check your data."
def __init__(self,
detailed_infos_for_cascading_failures: bool=False,
can_be_copied: bool=True,
**kwargs):
"""
Initialize an instance of Backend. This does nothing per se. Only the call to :func:`Backend.load_grid`
should guarantee the backend is properly configured.
:param detailed_infos_for_cascading_failures: Whether to be detailed (but slow) when computing cascading failures
:type detailed_infos_for_cascading_failures: :class:`bool`
"""
GridObjects.__init__(self)
# the following parameter is used to control the amount of verbosity when computing a cascading failure
# if it's set to true, it returns all intermediate _grid states. This can slow down the computation!
self.detailed_infos_for_cascading_failures = (
detailed_infos_for_cascading_failures
)
# the power _grid manipulated. One powergrid per backend.
self._grid = None
# thermal limit setting, in ampere, at the same "side" of the powerline than self.get_line_overflow
self.thermal_limit_a = None
# for the shunt (only if supported)
self._sh_vnkv = None # for each shunt gives the nominal value at the bus at which it is connected
# if this information is not present, then "get_action_to_set" might not behave correctly
self.comp_time = 0.0
self.can_output_theta = False
# to prevent the use of the same backend instance in different environment.
self._is_loaded = False
self._can_be_copied = can_be_copied
self._my_kwargs = {"detailed_infos_for_cascading_failures": detailed_infos_for_cascading_failures,
"can_be_copied": self._can_be_copied}
for k, v in kwargs.items():
self._my_kwargs[k] = v
@property
def is_loaded(self):
return self._is_loaded
@is_loaded.setter
def is_loaded(self, value):
if value is True:
self._is_loaded = True
else:
raise BackendError('Impossible to unset the "is_loaded" status.')
@abstractmethod
def load_grid(self, path, filename=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is called once at the loading of the powergrid.
Load the powergrid.
It should first define self._grid.
And then fill all the helpers used by the backend eg. all the attributes of :class:`Space.GridObjects`.
After a the call to :func:`Backend.load_grid` has been performed, the backend should be in such a state where
the :class:`grid2op.Space.GridObjects` is properly set up. See the description of
:class:`grid2op.Space.GridObjects` to know which attributes should be set here and which should not.
:param path: the path to find the powergrid
:type path: :class:`string`
:param filename: the filename of the powergrid
:type filename: :class:`string`, optional
:return: ``None``
"""
pass
@abstractmethod
def apply_action(self, action):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Don't attempt to apply an action directly to a backend. This function will modify
the powergrid state given the action in input.
This is one of the core function if you want to code a backend.
Modify the powergrid with the action given by an agent or by the envir.
For the L2RPN project, this action is mainly for topology if it has been sent by the agent.
Or it can also affect production and loads, if the action is made by the environment.
The help of :func:`grid2op.BaseAction.BaseAction.__call__` or the code in BaseActiontion.py file give more information about
the implementation of this method.
:param action: the action to be implemented on the powergrid.
:type action: :class:`grid2op.Action._BackendAction._BackendAction`
:return: ``None``
"""
pass
@abstractmethod
def runpf(self, is_dc=False):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is called by :func:`Backend.next_grid_state` (that computes some kind of
cascading failures).
This is one of the core function if you want to code a backend. It will carry out
a powerflow.
Run a power flow on the underlying _grid.
Powerflow can be AC (is_dc = False) or DC (is_dc = True)
:param is_dc: is the powerflow run in DC or in AC
:type is_dc: :class:`bool`
:return: ``True`` if it has converged, or false otherwise. In case of non convergence, no flows can be inspected on
the _grid.
:rtype: :class:`bool`
:return: an exception in case of divergence (or none if no particular info are available)
:rtype: `Exception`
"""
pass
@abstractmethod
def get_topo_vect(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.topo_vect`
Get the topology vector from the :attr:`Backend._grid`.
The topology vector defines, for each object, on which bus it is connected.
It returns -1 if the object is not connected.
It is a vector with as much elements (productions, loads and lines extremity) as there are in the powergrid.
For each elements, it gives on which bus it is connected in its substation.
For example, if the first element of this vector is the load of id 1, then if `res[0] = 2` it means that the
load of id 1 is connected to the second bus of its substation.
You can check which object of the powerlines is represented by each component of this vector by looking at the
`*_pos_topo_vect` (*eg.* :attr:`grid2op.Space.GridObjects.load_pos_topo_vect`) vectors.
For each elements it gives its position in this vector.
As any function of the backend, it is not advised to use it directly. You can get this information in the
:attr:`grid2op.Observation.Observation.topo_vect` instead.
Returns
--------
res: ``numpy.ndarray`` dtype: ``int``
An array saying to which bus the object is connected.
"""
pass
@abstractmethod
def generators_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.gen_p`,
:attr:`grid2op.Observation.BaseObservation.gen_q` and
:attr:`grid2op.Observation.BaseObservation.gen_v` instead.
This method is used to retrieve information about the generators (active, reactive production
and voltage magnitude of the bus to which it is connected).
.. note::
The values returned here are the values AFTER the powerflow has been computed and not
the target values.
Returns
-------
prod_p ``numpy.ndarray``
The active power production for each generator (in MW)
prod_q ``numpy.ndarray``
The reactive power production for each generator (in MVAr)
prod_v ``numpy.ndarray``
The voltage magnitude of the bus to which each generators is connected (in kV)
"""
pass
@abstractmethod
def loads_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.load_p`,
:attr:`grid2op.Observation.BaseObservation.load_q` and
:attr:`grid2op.Observation.BaseObservation.load_v` instead.
This method is used to retrieve information about the loads (active, reactive consumption
and voltage magnitude of the bus to which it is connected).
.. note::
The values returned here are the values AFTER the powerflow has been computed and not
the target values.
Returns
-------
load_p ``numpy.ndarray``
The active power consumption for each load (in MW)
load_q ``numpy.ndarray``
The reactive power consumption for each load (in MVAr)
load_v ``numpy.ndarray``
The voltage magnitude of the bus to which each load is connected (in kV)
"""
pass
@abstractmethod
def lines_or_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.p_or`,
:attr:`grid2op.Observation.BaseObservation.q_or`,
:attr:`grid2op.Observation.BaseObservation.a_or` and,
:attr:`grid2op.Observation.BaseObservation.v_or` instead
It returns the information extracted from the _grid at the origin end of each powerline.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
Returns
-------
p_or ``numpy.ndarray``
the origin active power flowing on the lines (in MW)
q_or ``numpy.ndarray``
the origin reactive power flowing on the lines (in MVAr)
v_or ``numpy.ndarray``
the voltage magnitude at the origin of each powerlines (in kV)
a_or ``numpy.ndarray``
the current flow at the origin of each powerlines (in A)
"""
pass
@abstractmethod
def lines_ex_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.p_ex`,
:attr:`grid2op.Observation.BaseObservation.q_ex`,
:attr:`grid2op.Observation.BaseObservation.a_ex` and,
:attr:`grid2op.Observation.BaseObservation.v_ex` instead
It returns the information extracted from the _grid at the extremity end of each powerline.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
Returns
-------
p_ex ``numpy.ndarray``
the extremity active power flowing on the lines (in MW)
q_ex ``numpy.ndarray``
the extremity reactive power flowing on the lines (in MVAr)
v_ex ``numpy.ndarray``
the voltage magnitude at the extremity of each powerlines (in kV)
a_ex ``numpy.ndarray``
the current flow at the extremity of each powerlines (in A)
"""
pass
def close(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is called by `env.close()` do not attempt to use it otherwise.
This function is called when the environment is over.
After calling this function, the backend might not behave properly, and in any case should not be used before
another call to :func:`Backend.load_grid` is performed
"""
pass
def reset(self, grid_path, grid_filename=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done in the `env.reset()` method and should be performed otherwise.
Reload the power grid.
For backwards compatibility this method calls `Backend.load_grid`.
But it is encouraged to overload it in the subclasses.
"""
self.comp_time = 0.0
self.load_grid(grid_path, filename=grid_filename)
def copy(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
.. note::
As of grid2op 1.7.1 you it is not mandatory to implement this function
when creating a backend.
If it is not available, then grid2op will automatically
deactivate the forecast capability and will not use the "backend.copy()"
function.
When this function is not implement, you will not be able to use (for
example) :func:`grid2op.Observation.BaseObservation.simulate` nor
the :class:`grid2op.simulator.Simulator` for example.
Performs a deep copy of the backend.
In the default implementation we explicitly called the deepcopy operator on `self._grid` to make the
error message more explicit in case there is a problem with this part.
The implementation is **equivalent** to:
.. code-block:: python
def copy(self):
return copy.deepcopy(self)
:return: An instance of Backend equal to :attr:`self`, but deep copied.
:rtype: :class:`Backend`
"""
if not self._can_be_copied:
raise BackendError("This backend cannot be copied.")
start_grid = self._grid
self._grid = None
res = copy.deepcopy(self)
res.__class__ = type(self) # somehow deepcopy forget the init class... weird
res._grid = copy.deepcopy(start_grid)
self._grid = start_grid
res._is_loaded = False # i can reload a copy of an environment
return res
def save_file(self, full_path):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Save the current power _grid in a human readable format supported by the backend.
The format is not modified by this wrapper.
This function is not mandatory, and if implemented, it is used only as a debugging purpose.
:param full_path: the full path (path + file name + extension) where *self._grid* is stored.
:type full_path: :class:`string`
:return: ``None``
"""
raise RuntimeError("Class {} does not allow for saving file.".format(self))
def get_line_status(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.line_status` instead
Return the status of each lines (connected : True / disconnected: False )
It is assume that the order of the powerline is fixed: if the status of powerline "l1" is put at the 42nd element
of the return vector, then it should always be set at the 42nd element.
It is also assumed that all the other methods of the backend that allows to retrieve informations on the powerlines
also respect the same convention, and consistent with one another.
For example, if powerline "l1" is the 42nd second of the vector returned by :func:`Backend.get_line_status` then information
about it's flow will be at position *42* of the vector returned by :func:`Backend.get_line_flow` for example.
:return: an array with the line status of each powerline
:rtype: np.array, dtype:bool
"""
topo_vect = self.get_topo_vect()
return (topo_vect[self.line_or_pos_topo_vect] >= 0) & (
topo_vect[self.line_ex_pos_topo_vect] >= 0
)
def get_line_flow(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.a_or` or
:attr:`grid2op.Observation.BaseObservation.a_ex` for example
Return the current flow in each lines of the powergrid. Only one value per powerline is returned.
If the AC mod is used, this shall return the current flow on the end of the powerline where there is a protection.
For example, if there is a protection on "origin end" of powerline "l2" then this method shall return the current
flow of at the "origin end" of powerline l2.
Note that in general, there is no loss of generality in supposing all protections are set on the "origin end" of
the powerline. So this method will return all origin line flows.
It is also possible, for a specific application, to return the maximum current flow between both ends of a power
_grid for more complex scenario.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
:return: an array with the line flows of each powerline
:rtype: np.array, dtype:float
"""
p_or, q_or, v_or, a_or = self.lines_or_info()
return a_or
def set_thermal_limit(self, limits):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
You can set the thermal limit directly in the environment.
This function is used as a convenience function to set the thermal limits :attr:`Backend.thermal_limit_a`
in amperes.
It can be used at the beginning of an episode if the thermal limit are not present in the original data files
or alternatively if the thermal limits depends on the period of the year (one in winter and one in summer
for example).
Parameters
----------
limits: ``object``
It can be understood differently according to its type:
- If it's a ``numpy.ndarray``, then it is assumed the thermal limits are given in amperes in the same order
as the powerlines computed in the backend. In that case it modifies all the thermal limits of all
the powerlines at once.
- If it's a ``dict`` it must have:
- as key the powerline names (not all names are mandatory, in that case only the powerlines with the name
in this dictionnary will be modified)
- as value the new thermal limit (should be a strictly positive float).
"""
if isinstance(limits, np.ndarray):
if limits.shape[0] == self.n_line:
self.thermal_limit_a = 1.0 * limits.astype(dt_float)
elif isinstance(limits, dict):
for el in limits.keys():
if not el in self.name_line:
raise BackendError(
'You asked to modify the thermal limit of powerline named "{}" that is not '
"on the grid. Names of powerlines are {}".format(
el, self.name_line
)
)
for i, el in self.name_line:
if el in limits:
try:
tmp = dt_float(limits[el])
except:
raise BackendError(
'Impossible to convert data ({}) for powerline named "{}" into float '
"values".format(limits[el], el)
)
if tmp <= 0:
raise BackendError(
'New thermal limit for powerlines "{}" is not positive ({})'
"".format(el, tmp)
)
self.thermal_limit_a[i] = tmp
def update_thermal_limit_from_vect(self, thermal_limit_a):
"""You can use it if your backend stores the thermal limits
of the grid in a vector (see :class:`PandaPowerBackend` for example)
.. warning::
This is not called by the environment and cannot be used to
model Dynamic Line Rating. For such purpose please use `update_thermal_limit`
This function is used to create a "Simulator" from a backend for example.
Parameters
----------
vect : np.ndarray
The thermal limits (in A)
"""
thermal_limit_a = np.array(thermal_limit_a).astype(dt_float)
self.thermal_limit_a[:] = thermal_limit_a
def update_thermal_limit(self, env):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done in a call to `env.step` in case of DLR for example.
If you don't want this feature, do not implement it.
Update the new thermal limit in case of DLR for example.
By default it does nothing.
Depending on the operational strategy, it is also possible to implement some
`Dynamic Line Rating <https://en.wikipedia.org/wiki/Dynamic_line_rating_for_electric_utilities>`_ (DLR)
strategies.
In this case, this function will give the thermal limit for a given time step provided the flows and the
weather condition are accessible by the backend. Our methodology doesn't make any assumption on the method
used to get these thermal limits.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
The environment used to compute the thermal limit
"""
pass
def get_thermal_limit(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Retrieve the thermal limit directly from the environment instead (with a call
to :func:`grid2op.Environment.BaseEnc.get_thermal_limit` for example)
Gives the thermal limit (in amps) for each powerline of the _grid. Only one value per powerline is returned.
It is assumed that both :func:`Backend.get_line_flow` and *_get_thermal_limit* gives the value of the same
end of the powerline.
See the help of *_get_line_flow* for a more detailed description of this problem.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
:return: An array giving the thermal limit of the powerlines.
:rtype: np.array, dtype:float
"""
return self.thermal_limit_a
def get_relative_flow(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.rho`
This method return the relative flows, *eg.* the current flow divided by the thermal limits. It has a pretty
straightforward default implementation, but it can be overriden for example for transformer if the limits are
on the lower voltage side or on the upper voltage level.
Returns
-------
res: ``numpy.ndarray``, dtype: float
The relative flow in each powerlines of the grid.
"""
num_ = self.get_line_flow()
denom_ = self.get_thermal_limit()
res = np.divide(num_, denom_)
return res
def get_line_overflow(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.rho` and
check whether or not the flow is higher tha 1. or have a look at
:attr:`grid2op.Observation.BaseObservation.timestep_overflow` and check the
non zero index.
Prefer using the attribute of the :class:`grid2op.Observation.BaseObservation`
faster accessor to the line that are on overflow.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
:return: An array saying if a powerline is overflow or not
:rtype: np.array, dtype:bool
"""
th_lim = self.get_thermal_limit()
flow = self.get_line_flow()
return flow > th_lim
def shunt_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method is optional. If implemented, it should return the proper information about the shunt in the
powergrid.
If not implemented it returns empty list.
Note that if there are shunt on the powergrid, it is recommended that this method should be implemented before
calling :func:`Backend.check_kirchoff`.
If this method is implemented AND :func:`Backend.check_kirchoff` is called, the method
:func:`Backend.sub_from_bus_id` should also be implemented preferably.
Returns
-------
shunt_p: ``numpy.ndarray``
For each shunt, the active power it withdraw at the bus to which it is connected.
shunt_q: ``numpy.ndarray``
For each shunt, the reactive power it withdraw at the bus to which it is connected.
shunt_v: ``numpy.ndarray``
For each shunt, the voltage magnitude of the bus to which it is connected.
shunt_bus: ``numpy.ndarray``
For each shunt, the bus id to which it is connected.
"""
return [], [], [], []
def get_theta(self):
"""
Notes
-----
Don't forget to set the flag :attr:`Backend.can_output_theta` to ``True`` in the
:func:`Bakcend.load_grid` if you support this feature.
Returns
-------
line_or_theta: ``numpy.ndarray``
For each origin side of powerline, gives the voltage angle
line_ex_theta: ``numpy.ndarray``
For each extremity side of powerline, gives the voltage angle
load_theta: ``numpy.ndarray``
Gives the voltage angle to the bus at which each load is connected
gen_theta: ``numpy.ndarray``
Gives the voltage angle to the bus at which each generator is connected
storage_theta: ``numpy.ndarray``
Gives the voltage angle to the bus at which each storage unit is connected
"""
raise NotImplementedError(
"Your backend does not support the retrieval of the voltage angle theta."
)
def sub_from_bus_id(self, bus_id):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Optional method that allows to get the substation if the bus id is provided.
Parameters
----------
bus_id: ``int``
The id of the bus where you want to know to which substation it belongs
Returns
-------
The substation to which an object connected to bus with id `bus_id` is connected to.
"""
raise BackendError(
"This backend doesn't allow to get the substation from the bus id."
)
def _disconnect_line(self, id_):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using the action space to disconnect a powerline.
Disconnect the line of id "id\\_ " in the backend.
In this scenario, the *id\\_* of a powerline is its position (counted starting from O) in the vector returned by
:func:`Backend.get_line_status` or :func:`Backend.get_line_flow` for example.
For example, if the current flow on powerline "l1" is the 42nd element of the vector returned by
:func:`Backend.get_line_flow`
then :func:`Backend._disconnect_line(42)` will disconnect this same powerline "l1".
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
:param id_: id of the powerline to be disconnected
:type id_: int
"""
my_cls = type(self)
action = my_cls._complete_action_class()
action.update({"set_line_status": [(id_, -1)]})
bk_act = my_cls.my_bk_act_class()
bk_act += action
self.apply_action(bk_act)
def _runpf_with_diverging_exception(self, is_dc):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Computes a power flow on the _grid and raises an exception in case of diverging power flow, or any other
exception that can be thrown by the backend.
:param is_dc: mode of the power flow. If *is_dc* is True, then the powerlow is run using the DC
approximation otherwise it uses the AC powerflow.
:type is_dc: bool
Raises
------
exc_: :class:`grid2op.Exceptions.DivergingPowerFlow`
In case of divergence of the powerflow
"""
conv = False
exc_me = None
try:
conv, exc_me = self.runpf(is_dc=is_dc) # run powerflow
except Grid2OpException as exc_:
exc_me = exc_
except Exception as exc_:
exc_me = DivergingPowerFlow(
f" An unexpected error occurred during the computation of the powerflow."
f"The error is: \n {exc_} \n. This is game over"
)
if not conv and exc_me is None:
exc_me = DivergingPowerFlow(
"GAME OVER: Powerflow has diverged during computation "
"or a load has been disconnected or a generator has been disconnected."
)
return exc_me
def next_grid_state(self, env, is_dc=False):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is called by `env.step`
This method is called by the environment to compute the next\\_grid\\_states.
It allows to compute the powerline and approximate the "cascading failures" if there are some overflows.
Attributes
----------
env: :class:`grid2op.Environment.Environment`
the environment in which the powerflow is ran.
is_dc: ``bool``
mode of power flow (AC : False, DC: is_dc is True)
Returns
--------
disconnected_during_cf: ``numpy.ndarray``, dtype=bool
For each powerlines, it returns ``True`` if the powerline has been disconnected due to a cascading failure
or ``False`` otherwise.
infos: ``list``
If :attr:`Backend.detailed_infos_for_cascading_failures` is ``True`` then it returns the different
state computed by the powerflow (can drastically slow down this function, as it requires
deep copy of backend object). Otherwise the list is always empty.
"""
infos = []
disconnected_during_cf = np.full(self.n_line, fill_value=-1, dtype=dt_int)
conv_ = self._runpf_with_diverging_exception(is_dc)
if env._no_overflow_disconnection or conv_ is not None:
return disconnected_during_cf, infos, conv_
# the environment disconnect some powerlines
init_time_step_overflow = copy.deepcopy(env._timestep_overflow)
ts = 0
while True:
# simulate the cascading failure
lines_flows = 1.0 * self.get_line_flow()
thermal_limits = self.get_thermal_limit() * env._parameters.SOFT_OVERFLOW_THRESHOLD # SOFT_OVERFLOW_THRESHOLD new in grid2op 1.9.3
lines_status = self.get_line_status()
# a) disconnect lines on hard overflow (that are still connected)
to_disc = (
lines_flows > env._hard_overflow_threshold * thermal_limits
) & lines_status
# b) deals with soft overflow (disconnect them if lines still connected)
init_time_step_overflow[(lines_flows >= thermal_limits) & lines_status] += 1
to_disc[
(init_time_step_overflow > env._nb_timestep_overflow_allowed)
& lines_status
] = True
# disconnect the current power lines
if to_disc[lines_status].sum() == 0:
# no powerlines have been disconnected at this time step, i stop the computation there
break
disconnected_during_cf[to_disc] = ts
# perform the disconnection action
for i, el in enumerate(to_disc):
if el:
self._disconnect_line(i)
# start a powerflow on this new state
conv_ = self._runpf_with_diverging_exception(is_dc)
if self.detailed_infos_for_cascading_failures:
infos.append(self.copy())
if conv_ is not None:
break
ts += 1
return disconnected_during_cf, infos, conv_
def storages_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.storage_power` instead.
This method is used to retrieve information about the storage units (active, reactive consumption
and voltage magnitude of the bus to which it is connected).
Returns
-------
storage_p ``numpy.ndarray``
The active power consumption for each load (in MW)
storage_q ``numpy.ndarray``
The reactive power consumption for each load (in MVAr)
storage_v ``numpy.ndarray``
The voltage magnitude of the bus to which each load is connected (in kV)
"""
if self.n_storage > 0:
raise BackendError(
"storages_info method is not implemented yet there is batteries on the grid."
)
def storage_deact_for_backward_comaptibility(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function is called under a very specific condition: an old environment has been loaded that
do not take into account the storage units, even though they were possibly some modeled by the backend.
This function is supposed to "remove" from the backend any reference to the storage units.
Overloading this function is not necessary (when developing a new backend). If it is not overloaded however,
some "backward compatibility" (for grid2op <= 1.4.0) might not be working properly depending on
your backend.
"""
pass
def check_kirchoff(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Check that the powergrid respects kirchhoff's law.
This function can be called at any moment (after a powerflow has been run)
to make sure a powergrid is in a consistent state, or to perform
some tests for example.
In order to function properly, this method requires that :func:`Backend.shunt_info` and
:func:`Backend.sub_from_bus_id` are properly defined. Otherwise the results might be wrong, especially
for reactive values (q_subs and q_bus bellow)
Returns
-------
p_subs ``numpy.ndarray``
sum of injected active power at each substations (MW)
q_subs ``numpy.ndarray``
sum of injected reactive power at each substations (MVAr)
p_bus ``numpy.ndarray``
sum of injected active power at each buses. It is given in form of a matrix, with number of substations as
row, and number of columns equal to the maximum number of buses for a substation (MW)
q_bus ``numpy.ndarray``
sum of injected reactive power at each buses. It is given in form of a matrix, with number of substations as
row, and number of columns equal to the maximum number of buses for a substation (MVAr)
diff_v_bus: ``numpy.ndarray`` (2d array)
difference between maximum voltage and minimum voltage (computed for each elements)
at each bus. It is an array of two dimension:
- first dimension represents the the substation (between 1 and self.n_sub)
- second element represents the busbar in the substation (0 or 1 usually)
"""
p_or, q_or, v_or, *_ = self.lines_or_info()
p_ex, q_ex, v_ex, *_ = self.lines_ex_info()
p_gen, q_gen, v_gen = self.generators_info()
p_load, q_load, v_load = self.loads_info()
if self.n_storage > 0:
p_storage, q_storage, v_storage = self.storages_info()
# fist check the "substation law" : nothing is created at any substation
p_subs = np.zeros(self.n_sub, dtype=dt_float)
q_subs = np.zeros(self.n_sub, dtype=dt_float)
# check for each bus
p_bus = np.zeros((self.n_sub, 2), dtype=dt_float)
q_bus = np.zeros((self.n_sub, 2), dtype=dt_float)
v_bus = (
np.zeros((self.n_sub, 2, 2), dtype=dt_float) - 1.0
) # sub, busbar, [min,max]
topo_vect = self.get_topo_vect()
# bellow i'm "forced" to do a loop otherwise, numpy do not compute the "+=" the way I want it to.
# for example, if two powerlines are such that line_or_to_subid is equal (eg both connected to substation 0)
# then numpy do not guarantee that `p_subs[self.line_or_to_subid] += p_or` will add the two "corresponding p_or"
# TODO this can be vectorized with matrix product, see example in obs.flow_bus_matrix (BaseObervation.py)
for i in range(self.n_line):
sub_or_id = self.line_or_to_subid[i]
sub_ex_id = self.line_ex_to_subid[i]
loc_bus_or = topo_vect[self.line_or_pos_topo_vect[i]] - 1
loc_bus_ex = topo_vect[self.line_ex_pos_topo_vect[i]] - 1
# for substations
p_subs[sub_or_id] += p_or[i]
p_subs[sub_ex_id] += p_ex[i]
q_subs[sub_or_id] += q_or[i]
q_subs[sub_ex_id] += q_ex[i]
# for bus
p_bus[sub_or_id, loc_bus_or] += p_or[i]
q_bus[sub_or_id, loc_bus_or] += q_or[i]
p_bus[ sub_ex_id, loc_bus_ex] += p_ex[i]
q_bus[sub_ex_id, loc_bus_ex] += q_ex[i]
# fill the min / max voltage per bus (initialization)
if (v_bus[sub_or_id,loc_bus_or,][0] == -1):
v_bus[sub_or_id,loc_bus_or,][0] = v_or[i]
if (v_bus[sub_ex_id,loc_bus_ex,][0] == -1):
v_bus[sub_ex_id,loc_bus_ex,][0] = v_ex[i]
if (v_bus[sub_or_id, loc_bus_or,][1]== -1):
v_bus[sub_or_id,loc_bus_or,][1] = v_or[i]
if (v_bus[sub_ex_id,loc_bus_ex,][1]== -1):
v_bus[sub_ex_id,loc_bus_ex,][1] = v_ex[i]
# now compute the correct stuff
if v_or[i] > 0.0:
# line is connected
v_bus[sub_or_id,loc_bus_or,][0] = min(v_bus[sub_or_id,loc_bus_or,][0],v_or[i],)
v_bus[sub_or_id,loc_bus_or,][1] = max(v_bus[sub_or_id,loc_bus_or,][1],v_or[i],)
if v_ex[i] > 0:
# line is connected
v_bus[sub_ex_id,loc_bus_ex,][0] = min(v_bus[sub_ex_id,loc_bus_ex,][0],v_ex[i],)
v_bus[sub_ex_id,loc_bus_ex,][1] = max(v_bus[sub_ex_id,loc_bus_ex,][1],v_ex[i],)
for i in range(self.n_gen):
# for substations
p_subs[self.gen_to_subid[i]] -= p_gen[i]
q_subs[self.gen_to_subid[i]] -= q_gen[i]
# for bus
p_bus[
self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1
] -= p_gen[i]
q_bus[
self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1
] -= q_gen[i]
# compute max and min values
if v_gen[i]:
# but only if gen is connected
v_bus[self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1][
0
] = min(
v_bus[
self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1
][0],
v_gen[i],
)
v_bus[self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1][
1
] = max(
v_bus[
self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1
][1],
v_gen[i],
)
for i in range(self.n_load):
# for substations
p_subs[self.load_to_subid[i]] += p_load[i]
q_subs[self.load_to_subid[i]] += q_load[i]
# for buses
p_bus[
self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1
] += p_load[i]
q_bus[
self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1
] += q_load[i]
# compute max and min values
if v_load[i]:
# but only if load is connected
v_bus[self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1][
0
] = min(
v_bus[
self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1
][0],
v_load[i],
)
v_bus[self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1][
1
] = max(
v_bus[
self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1
][1],
v_load[i],
)
for i in range(self.n_storage):
p_subs[self.storage_to_subid[i]] += p_storage[i]
q_subs[self.storage_to_subid[i]] += q_storage[i]
p_bus[
self.storage_to_subid[i], topo_vect[self.storage_pos_topo_vect[i]] - 1
] += p_storage[i]
q_bus[
self.storage_to_subid[i], topo_vect[self.storage_pos_topo_vect[i]] - 1
] += q_storage[i]
# compute max and min values
if v_storage[i] > 0:
# the storage unit is connected
v_bus[
self.storage_to_subid[i],
topo_vect[self.storage_pos_topo_vect[i]] - 1,
][0] = min(
v_bus[
self.storage_to_subid[i],
topo_vect[self.storage_pos_topo_vect[i]] - 1,
][0],
v_storage[i],
)
v_bus[
self.storage_to_subid[i],
topo_vect[self.storage_pos_topo_vect[i]] - 1,
][1] = max(
v_bus[
self.storage_to_subid[i],
topo_vect[self.storage_pos_topo_vect[i]] - 1,
][1],
v_storage[i],
)
if self.shunts_data_available:
p_s, q_s, v_s, bus_s = self.shunt_info()
for i in range(self.n_shunt):
# for substations
p_subs[self.shunt_to_subid[i]] += p_s[i]
q_subs[self.shunt_to_subid[i]] += q_s[i]
# for buses
p_bus[self.shunt_to_subid[i], bus_s[i] - 1] += p_s[i]
q_bus[self.shunt_to_subid[i], bus_s[i] - 1] += q_s[i]
# compute max and min values
v_bus[self.shunt_to_subid[i], bus_s[i] - 1][0] = min(
v_bus[self.shunt_to_subid[i], bus_s[i] - 1][0], v_s[i]
)
v_bus[self.shunt_to_subid[i], bus_s[i] - 1][1] = max(
v_bus[self.shunt_to_subid[i], bus_s[i] - 1][1], v_s[i]
)
else:
warnings.warn(
"Backend.check_kirchoff Impossible to get shunt information. Reactive information might be "
"incorrect."
)
diff_v_bus = np.zeros((self.n_sub, 2), dtype=dt_float)
diff_v_bus[:, :] = v_bus[:, :, 1] - v_bus[:, :, 0]
return p_subs, q_subs, p_bus, q_bus, diff_v_bus
def load_redispacthing_data(self, path, name="prods_charac.csv"):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method will load everything needed for the redispatching and unit commitment problem.
We don't recommend at all to modify this function.
Parameters
----------
path: ``str``
Location of the dataframe containing the redispatching data. This dataframe (csv, coma separated)
should have at least the columns (other columns are ignored, order of the colums do not matter):
- "name": identifying the name of the generator (should match the names in self.name_gen)
- "type": one of "thermal", "nuclear", "wind", "solar" or "hydro" representing the type of the generator
- "pmax": the maximum value the generator can produce (in MW)
- "pmin": the minimum value the generator can produce (in MW)
- "max_ramp_up": maximum value the generator can increase its production between two consecutive
steps TODO make it independant from the duration of the step
- "max_ramp_down": maximum value the generator can decrease its production between two consecutive
steps (is positive) TODO make it independant from the duration of the step
- "start_cost": starting cost of the generator in $ (or any currency you want)
- "shut_down_cost": cost associated to the shut down of the generator in $ (or any currency you want)
- "marginal_cost": "average" marginal cost of the generator. For now we don't allow it to vary across
different steps or episode in $/(MW.time step duration) and NOT $/MWh (TODO change that)
- "min_up_time": minimum time a generator need to stay "connected" before we can disconnect it (
measured in time step) (TODO change that)
- "min_down_time": minimum time a generator need to stay "disconnected" before we can connect it again.(
measured in time step) (TODO change that)
name: ``str``
Name of the dataframe containing the redispatching data. Defaults to 'prods_charac.csv', we don't advise
to change it.
"""
self._fill_names()
self.redispatching_unit_commitment_availble = False
# for redispatching
fullpath = os.path.join(path, name)
if not os.path.exists(fullpath):
return
try:
df = pd.read_csv(fullpath, sep=",")
except Exception as exc_:
warnings.warn(
f'Impossible to load the redispatching data for this environment with error:\n"{exc_}"\n'
f"Redispatching will be unavailable.\n"
f"Please make sure \"{name}\" file is a csv (coma ',') separated file."
)
return
mandatory_columns = [
"type",
"Pmax",
"Pmin",
"max_ramp_up",
"max_ramp_down",
"start_cost",
"shut_down_cost",
"marginal_cost",
"min_up_time",
"min_down_time",
]
for el in mandatory_columns:
if el not in df.columns:
warnings.warn(
f"Impossible to load the redispatching data for this environment because"
f"one of the mandatory column is not present ({el}). Please check the file "
f'"{name}" contains all the mandatory columns: {mandatory_columns}'
)
return
gen_info = {}
for _, row in df.iterrows():
gen_info[row["name"]] = {
"type": row["type"],
"pmax": row["Pmax"],
"pmin": row["Pmin"],
"max_ramp_up": row["max_ramp_up"],
"max_ramp_down": row["max_ramp_down"],
"start_cost": row["start_cost"],
"shut_down_cost": row["shut_down_cost"],
"marginal_cost": row["marginal_cost"],
"min_up_time": row["min_up_time"],
"min_down_time": row["min_down_time"],
}
self.redispatching_unit_commitment_availble = True
self.gen_type = np.full(self.n_gen, fill_value="aaaaaaaaaa")
self.gen_pmin = np.full(self.n_gen, fill_value=1.0, dtype=dt_float)
self.gen_pmax = np.full(self.n_gen, fill_value=1.0, dtype=dt_float)
self.gen_redispatchable = np.full(self.n_gen, fill_value=False, dtype=dt_bool)
self.gen_max_ramp_up = np.full(self.n_gen, fill_value=0.0, dtype=dt_float)
self.gen_max_ramp_down = np.full(self.n_gen, fill_value=0.0, dtype=dt_float)
self.gen_min_uptime = np.full(self.n_gen, fill_value=-1, dtype=dt_int)
self.gen_min_downtime = np.full(self.n_gen, fill_value=-1, dtype=dt_int)
self.gen_cost_per_MW = np.full(
self.n_gen, fill_value=1.0, dtype=dt_float
) # marginal cost
self.gen_startup_cost = np.full(
self.n_gen, fill_value=1.0, dtype=dt_float
) # start cost
self.gen_shutdown_cost = np.full(
self.n_gen, fill_value=1.0, dtype=dt_float
) # shutdown cost
self.gen_renewable = np.full(self.n_gen, fill_value=False, dtype=dt_bool)
for i, gen_nm in enumerate(self.name_gen):
try:
tmp_gen = gen_info[gen_nm]
except KeyError as exc_:
raise BackendError(
f"Impossible to load the redispatching data. The generator {i} with name {gen_nm} "
f'could not be located on the description file "{name}".'
)
self.gen_type[i] = str(tmp_gen["type"])
self.gen_pmin[i] = self._aux_check_finite_float(
tmp_gen["pmin"], f' for gen. "{gen_nm}" and column "pmin"'
)
self.gen_pmax[i] = self._aux_check_finite_float(
tmp_gen["pmax"], f' for gen. "{gen_nm}" and column "pmax"'
)
self.gen_redispatchable[i] = dt_bool(
tmp_gen["type"] not in ["wind", "solar"]
)
tmp = dt_float(tmp_gen["max_ramp_up"])
if np.isfinite(tmp):
self.gen_max_ramp_up[i] = tmp
tmp = dt_float(tmp_gen["max_ramp_down"])
if np.isfinite(tmp):
self.gen_max_ramp_down[i] = tmp
self.gen_min_uptime[i] = dt_int(tmp_gen["min_up_time"])
self.gen_min_downtime[i] = dt_int(tmp_gen["min_down_time"])
self.gen_cost_per_MW[i] = dt_float(tmp_gen["marginal_cost"])
self.gen_startup_cost[i] = dt_float(tmp_gen["start_cost"])
self.gen_shutdown_cost[i] = dt_float(tmp_gen["shut_down_cost"])
self.gen_renewable[i] = dt_bool(tmp_gen["type"] in ["wind", "solar"])
self.redispatching_unit_commitment_availble = True
def load_storage_data(self, path, name="storage_units_charac.csv"):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method will load everything needed in presence of storage unit on the grid.
We don't recommend at all to modify this function.
Parameters
----------
path: ``str``
Location of the dataframe containing the storage unit data. This dataframe (csv, coma separated)
should have at least the columns. It is mandatory to have it if there are storage units on the grid,
but it is ignored if not:
- "name": identifying the name of the unit storage (should match the names in self.name_storage)
- "type": one of "battery", "pumped_storage" representing the type of the unit storage
- "Emax": the maximum energy capacity the unit can store (in MWh)
- "Emin": the minimum energy capacity the unit can store (in MWh) [it can be >0 if a battery cannot be
completely empty for example]
- "max_p_prod": maximum flow the battery can absorb in MW
- "max_p_absorb": maximum flow the battery can produce in MW
- "marginal_cost": cost in $ (or any currency, really) of usage of the battery.
- "power_discharge_loss" (optional): power loss in the battery in MW (the capacity will decrease constantly
of this amount). Set it to 0.0 to deactivate it. If not present, it is set to 0.
- "charging_efficiency" (optional):
Float between 0. and 1. 1. means that if the grid provides 1MW (for ex. 1MW for 1h) to the storage
capacity, then the
state of charge of the battery will increase of 1MWh. If this efficiency is 0.5 then if 1MWh
if provided by the grid, then only 0.5MWh will be stored.
- "discharging_efficiency" (optional): battery efficiency when it is discharged. 1.0 means if you want to
get 1MWh on the grid, the battery state of charge will decrease by 1MWh. If this is 33% then it
means if you want to get (grid point of view) 1MWh on the grid, you need to decrease the
state of charge of 3MWh.
name: ``str``
Name of the dataframe containing the redispatching data. Defaults to 'prods_charac.csv', we don't advise
to change it.
Notes
-----
The battery efficiency defined as the "AC-AC" round trip efficiency is, with the convention above, defined
as `charging_efficiency * discharging_efficency` (see
https://www.greeningthegrid.org/news/new-resource-grid-scale-battery-storage-frequently-asked-questions-1
for further references)
"""
if self.n_storage == 0:
# set the "no battery state" if there are none
type(self).set_no_storage()
return
# for storage unit information
fullpath = os.path.join(path, name)
if not os.path.exists(fullpath):
raise BackendError(
f"There are storage unit on the grid, yet we could not locate their description."
f'Please make sure to have a file "{name}" where the environment data are located.'
f'For this environment the location is "{path}"'
)
try:
df = pd.read_csv(fullpath)
except Exception as exc_:
raise BackendError(
f"There are storage unit on the grid, yet we could not locate their description."
f'Please make sure to have a file "{name}" where the environment data are located.'
f'For this environment the location is "{path}"'
)
mandatory_colnames = [
"name",
"type",
"Emax",
"Emin",
"max_p_prod",
"max_p_absorb",
"marginal_cost",
]
for el in mandatory_colnames:
if el not in df.columns:
raise BackendError(
f"There are storage unit on the grid, yet we could not properly load their "
f"description. Please make sure the csv {name} contains all the columns "
f"{mandatory_colnames}"
)
stor_info = {}
for _, row in df.iterrows():
stor_info[row["name"]] = {
"name": row["name"],
"type": row["type"],
"Emax": row["Emax"],
"Emin": row["Emin"],
"max_p_prod": row["max_p_prod"],
"max_p_absorb": row["max_p_absorb"],
"marginal_cost": row["marginal_cost"],
}
if "power_loss" in row:
stor_info[row["name"]]["power_loss"] = row["power_loss"]
else:
stor_info[row["name"]]["power_loss"] = 0.0
if "charging_efficiency" in row:
stor_info[row["name"]]["charging_efficiency"] = row[
"charging_efficiency"
]
else:
stor_info[row["name"]]["charging_efficiency"] = 1.0
if "discharging_efficiency" in row:
stor_info[row["name"]]["discharging_efficiency"] = row[
"discharging_efficiency"
]
else:
stor_info[row["name"]]["discharging_efficiency"] = 1.0
self.storage_type = np.full(self.n_storage, fill_value="aaaaaaaaaa")
self.storage_Emax = np.full(self.n_storage, fill_value=1.0, dtype=dt_float)
self.storage_Emin = np.full(self.n_storage, fill_value=0.0, dtype=dt_float)
self.storage_max_p_prod = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
self.storage_max_p_absorb = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
self.storage_marginal_cost = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
self.storage_loss = np.full(self.n_storage, fill_value=0.0, dtype=dt_float)
self.storage_charging_efficiency = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
self.storage_discharging_efficiency = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
for i, sto_nm in enumerate(self.name_storage):
try:
tmp_sto = stor_info[sto_nm]
except KeyError as exc_:
raise BackendError(
f"Impossible to load the storage data. The storage unit {i} with name {sto_nm} "
f'could not be located on the description file "{name}" with error : \n'
f"{exc_}."
)
self.storage_type[i] = str(tmp_sto["type"])
self.storage_Emax[i] = self._aux_check_finite_float(
tmp_sto["Emax"], f' for {sto_nm} and column "Emax"'
)
self.storage_Emin[i] = self._aux_check_finite_float(
tmp_sto["Emin"], f' for {sto_nm} and column "Emin"'
)
self.storage_max_p_prod[i] = self._aux_check_finite_float(
tmp_sto["max_p_prod"], f' for {sto_nm} and column "max_p_prod"'
)
self.storage_max_p_absorb[i] = self._aux_check_finite_float(
tmp_sto["max_p_absorb"], f' for {sto_nm} and column "max_p_absorb"'
)
self.storage_marginal_cost[i] = self._aux_check_finite_float(
tmp_sto["marginal_cost"], f' for {sto_nm} and column "marginal_cost"'
)
self.storage_loss[i] = self._aux_check_finite_float(
tmp_sto["power_loss"], f' for {sto_nm} and column "power_loss"'
)
self.storage_charging_efficiency[i] = self._aux_check_finite_float(
tmp_sto["charging_efficiency"],
f' for {sto_nm} and column "charging_efficiency"',
)
self.storage_discharging_efficiency[i] = self._aux_check_finite_float(
tmp_sto["discharging_efficiency"],
f' for {sto_nm} and column "discharging_efficiency"',
)
def _aux_check_finite_float(self, nb_, str_=""):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
check and returns if correct that a number is convertible to `dt_float` and that it's finite
"""
tmp = dt_float(nb_)
if not np.isfinite(tmp):
raise BackendError(
f"Infinite number met for a number that should be finite. Please check your data {str_}"
)
return tmp
def load_grid_layout(self, path, name="grid_layout.json"):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
We don't recommend at all to modify this function.
This function loads the layout (eg the coordinates of each substation) for the powergrid.
Parameters
----------
path: ``str``
TODO
name: ``str``
TODO
"""
full_fn = os.path.join(path, name)
if not os.path.exists(full_fn):
return Exception("File {} does not exist".format(full_fn))
try:
with open(full_fn, "r") as f:
dict_ = json.load(f)
except Exception as e:
return e
new_grid_layout = {}
for el in self.name_sub:
if el not in dict_:
return Exception("substation named {} not in layout".format(el))
tmp = dict_[el]
try:
x, y = tmp
x = dt_float(x)
y = dt_float(y)
new_grid_layout[el] = (x, y)
except Exception as e_:
return Exception(
"fail to convert coordinates for {} into list of coordinates with error {}"
"".format(el, e_)
)
self.attach_layout(grid_layout=new_grid_layout)
return None
def _aux_get_line_status_to_set(self, line_status):
line_status = 2 * line_status - 1
line_status = line_status.astype(dt_int)
return line_status
def get_action_to_set(self):
"""
Get the action to set another backend to represent the internal state of this current backend.
It handles also the information about the shunts if available
Returns
-------
res: :class:`grid2op.Action.CompleteAction`
The complete action to set a backend to the internal state of `self`
"""
line_status = self._aux_get_line_status_to_set(self.get_line_status())
topo_vect = self.get_topo_vect()
if np.all(topo_vect == -1):
raise RuntimeError(
"The get_action_to_set should not be used after a divergence of the powerflow"
)
prod_p, _, prod_v = self.generators_info()
load_p, load_q, _ = self.loads_info()
set_me = self._complete_action_class()
dict_ = {
"set_line_status": line_status,
"set_bus": 1 * topo_vect,
"injection": {
"prod_p": prod_p,
"prod_v": prod_v,
"load_p": load_p,
"load_q": load_q,
},
}
if self.shunts_data_available:
p_s, q_s, sh_v, bus_s = self.shunt_info()
dict_["shunt"] = {"shunt_bus": bus_s}
if (bus_s >= 1).sum():
p_s *= (self._sh_vnkv / sh_v) ** 2
q_s *= (self._sh_vnkv / sh_v) ** 2
p_s[bus_s == -1] = np.NaN
q_s[bus_s == -1] = np.NaN
dict_["shunt"]["shunt_p"] = p_s
dict_["shunt"]["shunt_q"] = q_s
if self.n_storage > 0:
sto_p, *_ = self.storages_info()
dict_["set_storage"] = 1.0 * sto_p
set_me.update(dict_)
return set_me
def update_from_obs(self, obs, force_update=False):
"""
Takes an observation as input and update the internal state of `self` to match the state of the backend
that produced this observation.
Only the "line_status", "topo_vect", "prod_p", "prod_v", "load_p" and "load_q" attributes of the
observations are used.
Notes
-----
If the observation is not perfect (for example with noise, or partial) this method will not work. You need
to pass it a complete observation.
For example, you might want to consider to have a state estimator if that is the case.
Parameters
----------
obs: :class:`grid2op.Observation.CompleteObservation`
A complete observation describing the state of the grid you want this backend to be in.
"""
# lazy loading to prevent circular references
from grid2op.Observation import CompleteObservation
if (not force_update) and (not isinstance(obs, CompleteObservation)):
raise BackendError(
"Impossible to set a backend to a state not represented by a "
'"grid2op.Observation.CompleteObservation".'
)
backend_action = self.my_bk_act_class()
act = self._complete_action_class()
line_status = self._aux_get_line_status_to_set(obs.line_status)
# skip the action part and update directly the backend action !
dict_ = {
"set_bus": obs.topo_vect,
"set_line_status": line_status,
"injection": {
"prod_p": obs.prod_p,
"prod_v": obs.prod_v,
"load_p": obs.load_p,
"load_q": obs.load_q,
},
}
if self.shunts_data_available and obs.shunts_data_available:
if "_shunt_bus" not in type(obs).attr_list_set:
raise BackendError(
"Impossible to set the backend to the state given by the observation: shunts data "
"are not present in the observation."
)
dict_["shunt"] = {"shunt_bus": obs._shunt_bus}
shunt_co = obs._shunt_bus >= 1
if shunt_co.any():
mults = (self._sh_vnkv / obs._shunt_v) ** 2
sh_p = obs._shunt_p * mults
sh_q = obs._shunt_q * mults
sh_p[~shunt_co] = np.NaN
sh_q[~shunt_co] = np.NaN
dict_["shunt"]["shunt_p"] = sh_p
dict_["shunt"]["shunt_q"] = sh_q
act.update(dict_)
backend_action += act
self.apply_action(backend_action)
def assert_grid_correct(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done as it should be by the Environment
"""
# lazy loading
from grid2op.Action import CompleteAction
from grid2op.Action._backendAction import _BackendAction
orig_type = type(self)
if orig_type.my_bk_act_class is None:
# class is already initialized
# and set up the proper class and everything
self._init_class_attr()
# hack due to changing class of imported module in the module itself
self.__class__ = type(self).init_grid(
type(self), force_module=type(self).__module__
)
setattr(
sys.modules[type(self).__module__],
self.__class__.__name__,
self.__class__,
)
# reset the attribute of the grid2op.Backend.Backend class
# that can be messed up with depending on the initialization of the backend
Backend._clear_class_attribute()
orig_type._clear_class_attribute()
my_cls = type(self)
my_cls.my_bk_act_class = _BackendAction.init_grid(my_cls)
my_cls._complete_action_class = CompleteAction.init_grid(my_cls)
my_cls._complete_action_class._add_shunt_data()
my_cls._complete_action_class._update_value_set()
my_cls.assert_grid_correct_cls()
def assert_grid_correct_after_powerflow(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done as it should be by the Environment
This method is called by the environment. It ensure that the backend remains consistent even after a powerflow
has be run with :func:`Backend.runpf` method.
:raise: :class:`grid2op.Exceptions.EnvError` and possibly all of its derived class.
"""
# test the results gives the proper size
tmp = self.get_line_status()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_line_status()"')
if (~np.isfinite(tmp)).any():
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.get_line_flow()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_line_flow()"')
if (~np.isfinite(tmp)).any():
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.get_thermal_limit()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_thermal_limit()"')
if (~np.isfinite(tmp)).any():
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.get_line_overflow()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_line_overflow()"')
if (~np.isfinite(tmp)).any():
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.generators_info()
if len(tmp) != 3:
raise EnvError(
'"generators_info()" should return a tuple with 3 elements: p, q and v'
)
for el in tmp:
if el.shape[0] != self.n_gen:
raise IncorrectNumberOfGenerators(
'returned by "backend.generators_info()"'
)
tmp = self.loads_info()
if len(tmp) != 3:
raise EnvError(
'"loads_info()" should return a tuple with 3 elements: p, q and v'
)
for el in tmp:
if el.shape[0] != self.n_load:
raise IncorrectNumberOfLoads('returned by "backend.loads_info()"')
tmp = self.lines_or_info()
if len(tmp) != 4:
raise EnvError(
'"lines_or_info()" should return a tuple with 4 elements: p, q, v and a'
)
for el in tmp:
if el.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.lines_or_info()"')
tmp = self.lines_ex_info()
if len(tmp) != 4:
raise EnvError(
'"lines_ex_info()" should return a tuple with 4 elements: p, q, v and a'
)
for el in tmp:
if el.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.lines_ex_info()"')
if self.n_storage > 0:
tmp = self.storages_info()
if len(tmp) != 3:
raise EnvError(
'"storages_info()" should return a tuple with 3 elements: p, q and v'
)
for el in tmp:
if el.shape[0] != self.n_storage:
raise IncorrectNumberOfLines(
'returned by "backend.storages_info()"'
)
tmp = self.get_topo_vect()
if tmp.shape[0] != self.sub_info.sum():
raise IncorrectNumberOfElements('returned by "backend.get_topo_vect()"')
if (~np.isfinite(tmp)).any():
raise EnvError(
'Some components of "backend.get_topo_vect()" are not finite. This should be integer.'
) | PypiClean |
/CB_IPO-0.2.0.tar.gz/CB_IPO-0.2.0/docs/_build/doctrees/nbsphinx/Example Cases.ipynb | ### Importing
```
from CB_IPO import scrape
```
### Initializing
```
scraper = scrape()
```
### Scraping for recent IPO filings (S-1)
```
company_names, file_dates, form_types = scraper.edgar_scrape(5)
print(company_names)
print(file_dates)
print(form_types)
```
### Formatting into Dataframe
```
df = scraper.generate_df(5,1)
print(df)
```
### Modifying Search Dates
```
scraper.set_search_date("2021-01-01", "2023-03-31")
company_names, file_dates, form_types = scraper.edgar_scrape(5)
print (file_dates)
df = scraper.generate_df(5,1)
print(df)
```
### Modifying Form Types
```
scraper.reset_url()
scraper.add_forms(['10-K', '10-Q'])
company_names, file_dates, form_types = scraper.edgar_scrape(5)
print (form_types)
df = scraper.generate_df(5,1)
print(df)
```
### Generating Reference Codes and Accession Numbers
```
#This is the cik that identifies Pfizer
cik = '0000078003'
#This will yield all references and the name of a company tied to a cik
references, name = scraper.get_refs(cik,3)
#This will yield all accession numbers tied to a cik
accession_numbers = scraper.get_anums(cik,3)
print(references)
print(name)
```
### Finding xbrl Links for a Company
```
links,c_name = scraper.create_links(cik, 3)
for link in links:
print(link)
```
### Scraping Link for Financial Info
```
for link in links:
total_assets, total_liabilities, net_income = scraper.scrape_xbrl(link)
print('Assets: ',total_assets)
print('Liabilities: ',total_liabilities)
print('NI: ',net_income)
print()
```
| PypiClean |
/GOUDA-0.6.0.tar.gz/GOUDA-0.6.0/docs/index.md | # GOUDA
Good Old Utilities for Data Analysis!
## Note
> This is the main page of your project's [Sphinx] documentation. It is
> formatted in [Markdown]. Add additional pages by creating md-files in
> `docs` or rst-files (formated in [reStructuredText]) and adding links to
> them in the `Contents` section below.
>
> Please check [Sphinx], [recommonmark] and [autostructify] for more information
> about how to document your project and how to configure your preferences.
## Contents
* [Overview](readme)
* [License](license)
* [Authors](authors)
* [Changelog](changelog)
* [Module Reference](api/modules)
## Indices and tables
```eval_rst
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
```
[Sphinx]: http://www.sphinx-doc.org/
[Markdown]: https://daringfireball.net/projects/markdown/
[reStructuredText]: http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html
[recommonmark]: https://recommonmark.readthedocs.io/en/latest
[autostructify]: https://recommonmark.readthedocs.io/en/latest/auto_structify.html
| PypiClean |
/FreqDemod-0.2.1.tar.gz/FreqDemod-0.2.1/docs/background.rst | .. _Conventions:
Conventions and Background
==========================
**Fourier Transform**. Our frequency-demodulation algorithm implements the Hilbert Transform indirectly, *via* a Discrete Fourier Transform (DFT). We use the ``numpy.fft`` package [#numpy.fft]_ to carry out the DFT. This package defines the Fourier transform of the signal :math:`a_n` (having :math:`N` data points, :math:`n = 0, 1, .\ldots, N - 1`) as
.. math::
:label: eq:DFT
A_k = \sum_{n = 0}^{N - 1} a_n \: e^{-2 \pi \imath \, n k / N}
while the inverse Fourier transform is defined as
.. math::
:label: eq:DIFT
a_n = \frac{1}{N} \sum_{k = 0}^{N-1} A_k \: e^{\: 2 \pi \imath \, n k / N}.
In the derivations presented below, we will have need of the continuous Fourier transform. The continuous analog of the forward transform (equation :eq:`eq:DFT`) is
.. math::
:label: eq:FT
\hat{a}(f) = \int_{-\infty}^{+\infty} dt \:
a(t) \: e^{-2 \pi \imath f t }
while the continuous analog of the inverse transform (equation :eq:`eq:DIFT`) is
.. math::
:label: eq:IFT
a(t) = \int_{-\infty}^{+\infty} df \:
\hat{a}(f) \: e^{\: 2 \pi \imath f t }
We thus define our Fourier transform in terms of the frequency variable :math:`f \: \sim \: [\text{cycles/s} = \text{Hz}]` and not :math:`\omega = 2 \pi f \: \sim \: [\text{radians/s}]`. While this transform-variable convention agrees with the convention espoused by *Numerical Recipes* [#Press1986]_, the sign of the exponent in the ``numpy.fft`` DFT (:math:`-2 \pi \imath \, n k / N`) is different from the sign of the exponent in the *Numerical Recipes* DFT (:math:`+2 \pi \imath \, n k / N`).
In the following tutorials we define a correlation function and power spectrum based on the Fourier transform conventions of equations :eq:`eq:FT` and :eq:`eq:IFT`. The results of the tutorials can be summarized as follows.
**Cantilever Thermomechanical Fluctuations**. We characterize a microcantilever by its resonance frequency :math:`f_0 \: [\mathrm{Hz}]`, ringdown time :math:`\tau_0 \: [\mathrm{s}]`, and frictional coefficient :math:`\Gamma \: [\mathrm{N} \mathrm{s} \mathrm{m}^{-1}]`. The cantilever experiences a stochastic force arising from its interaction with the environment that gives rise to thermal fluctuations in cantilever position. In the first tutorial we show that, for a microcantilever in thermal equilibrium at temperature :math:`T`, the resulting power spectrum of these thermal fluctuations in cantilever position is given by
.. math::
:label: Eq:Pdzf
P_{\delta z}(f) = \frac{k_b T \tau_0^2}{\Gamma}
\frac{1}{(\pi \tau_0)^4 (f_0^2 - f^2)^2 + (\pi \tau_0)^2 f^2}
with :math:`k_b` Boltzmann's constant and :math:`T` the temperature. Assuming that the cantilever's temperature is known, we can fit the observed power spectrum of position fluctuations to equation :eq:`Eq:Pdzf` to obtain :math:`f_0`, :math:`\tau_0`, and :math:`\Gamma`. In terms of the quantities in equation :eq:`Eq:Pdzf`, the cantilever spring constant and quality factor are computed as :math:`k = 2 \pi^2 f_0^2 \tau_0 \Gamma \: [\mathrm{N} \: \mathrm{m}^{-1}]` and :math:`Q = \pi f_0 \tau_0 \: [\mathrm{unitless}]`, respectively.
**Cantilever Frequency Noise**. Both thermomechanical position fluctuations and detector noise contribute to the noise observed in the cantilever frequency determined using the algorithm described in the Introduction. In the second tutorial we show that these two noise sources give rise to apparent fluctuations in cantilever frequency whose power spectrum is given by
.. math::
:label: Eq:Pdff
P_{\delta f}(f) = \frac{1}{x_{\mathrm{rms}}^2}
\left(
\frac{1}{4 \pi^2} \frac{k_b T}{\Gamma} \frac{1}{(\pi \tau_0 f_0)^2}
+ f^2 P_{\delta x}^{\mathrm{det}}
\right)
with :math:`x_{\mathrm{rms}}` the root-mean-square amplitude of the driven cantilever, :math:`P_{\delta x}^{\mathrm{det}} \: [\mathrm{m}^2 \: \mathrm{Hz}^{-1}]` the power spectrum of detector noise written as an equivalent position fluctuation. In writing equation :eq:`Eq:Pdff`, we have assumed for simplicity that :math:`P_{\delta x}^{\mathrm{det}}(f)` is independent of frequency in the vicinity of the cantilever resonance at :math:`f = f_0`.
**References**
.. [#numpy.fft] *Discrete Fourier Transform* (``numpy.fft``). http://docs.scipy.org/doc/numpy/reference/routines.fft.html
.. [#Press1986] Press, W. H.; Flannery, B. P.; Teukolsky, S. A. & Vetterling, W. T. Numerical Recipes, The Art of Scientific Computing. Cambridge University Press, New York (1986). The current edition (3rd edition; 2007) is available online through http://www.nr.com/.
.. NOTES
.. =====
..
.. with 20080223-Marohn-Group_Report-Frequency_Noise_Tutorial-ver1
.. = fnt.tex
.. pandoc --output=fnt.rst --from=latex --to=rst fnt.tex
.. the conversion generated no errors
.. copy the contents of fnt.rst below and manually change === to --- etc
.. delete \color{Blue} everywhere
.. add the :label: Eq:xxx role everywhere we want numbered equation
.. can not have underscores in equation labels
.. refer to equations inline using :eq:`Eq:xxx`
.. with 20080223-Marohn-Group_Report-Frequency_Noise_Tutorial-ver1.tex
.. = hobm.tex
.. pandoc --output=hobm.rst --from=latex --to=rst hobm.tex
.. the conversion generated no errors
.. then hand-edit as indicated above
.. copy the contents of hobm.rst below and hand edit as follows
.. replace all the unit macros: \sec with {\mathrm{s}} and etc
.. add back in the section headings manually
.. add reference labels for the sections manually
.. edit out the macros involving \ensuremath
.. remove \tiny and \small
.. remove \lefteqn
.. remove as many as possible \begin{aligned} since we have a wider page here
.. grep search for \[eq:(\w+)\]
.. and replace with :eq:`eq:\1`
.. grep search eq. and replace with equation
.. upper document uses equation lables eq:xxx, the lower document Eq:xxx
.. look for :eq:`Eq and add the work equation before each reference
.. \begin{align} does not work well, but \begin{split} does. | PypiClean |
/OBP_reliability_pillar_3-0.0.13.tar.gz/OBP_reliability_pillar_3-0.0.13/OBP_reliability_pillar_4/__init__.py | from boto3 import session
from OBP_reliability_pillar_4.cloudwatch import cloudwatch
from OBP_reliability_pillar_4.dynamodb import dynamodb
from OBP_reliability_pillar_4.elastic_beanstalk import elastic_beanstalk
from OBP_reliability_pillar_4.elastic_load_balancer import elb
from OBP_reliability_pillar_4.rds import rds
from OBP_reliability_pillar_4.ec2 import ec2
from OBP_reliability_pillar_4.redshift import redshift
from OBP_reliability_pillar_4.s3 import s3
# from OBP_reliability_pillar_4.security_hub import security_hub
from OBP_reliability_pillar_4.auto_scaling import auto_scaling
from OBP_reliability_pillar_4.lambdafn import lambdafn
from OBP_reliability_pillar_4.guard_duty import guard_duty
from OBP_reliability_pillar_4.elastic_search import elastic_search
__version__ = '0.0.13'
__author__ = 'Dheeraj Banodha'
class aws_client(elb, dynamodb, cloudwatch, rds, guard_duty, elastic_search,
ec2, s3, elastic_beanstalk, redshift, auto_scaling, lambdafn):
def __init__(self, **kwargs):
"""
@param str aws_access_key_id: AWS Access Key ID
@param str aws_secret_access_key: AWS Secret Access Key
"""
if 'aws_access_key_id' in kwargs.keys() and 'aws_secret_access_key' in kwargs.keys():
self.session = session.Session(
aws_access_key_id=kwargs['aws_access_key_id'],
aws_secret_access_key=kwargs['aws_secret_access_key'],
)
elif 'profile_name' in kwargs.keys():
self.session = session.Session(profile_name=kwargs['profile_name'])
elb.__init__(elb, self.session)
dynamodb.__init__(dynamodb, self.session)
cloudwatch.__init__(cloudwatch, self.session)
rds.__init__(rds, self.session)
ec2.__init__(ec2, self.session)
s3.__init__(s3, self.session)
elastic_beanstalk.__init__(elastic_beanstalk, self.session)
redshift.__init__(redshift, self.session)
# security_hub.__init__(security_hub, self.session)
auto_scaling.__init__(auto_scaling, self.session)
lambdafn.__init__(lambdafn, self.session)
guard_duty.__init__(guard_duty, self.session)
elastic_search.__init__(elastic_search, self.session)
# consolidate compliance.py details
def get_compliance(self) -> list:
"""
:return list: consolidated list of compliance.py checks
"""
compliance = []
compliance.extend(self.dynamodb_compliance())
compliance.extend(self.elb_compliance())
compliance.extend(self.cloudwatch_compliance())
compliance.extend(self.rds_compliance())
compliance.extend(self.ec2_compliance())
compliance.extend(self.s3_compliance())
compliance.extend(self.elastic_beanstalk_compliance())
compliance.extend(self.redshift_compliance())
compliance.extend(self.auto_scaling_compliance())
# compliance.extend(self.security_hub_enabled())
compliance.extend(self.lambda_compliance())
compliance.extend(self.guard_duty_compliance())
compliance.extend(self.elastic_search_compliance())
return compliance | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/av/widget/Status.js | if(!dojo._hasResource["dojox.av.widget.Status"]){
dojo._hasResource["dojox.av.widget.Status"]=true;
dojo.provide("dojox.av.widget.Status");
dojo.require("dijit._Widget");
dojo.require("dijit._Templated");
dojo.declare("dojox.av.widget.Status",[dijit._Widget,dijit._Templated],{templateString:dojo.cache("dojox.av.widget","resources/Status.html","<table class=\"Status\">\n <tr>\n <td class=\"Time\"><span dojoAttachPoint=\"timeNode\">0.00</span></td>\n <td class=\"Status\"><div dojoAttachPoint=\"titleNode\">Loading...</div></td>\n <td class=\"Duration\"><span dojoAttachPoint=\"durNode\">0.00</span></td>\n </tr>\n</table>\n"),postCreate:function(){
this.titleNode=dojo.query(".Status",this.domNode);
this.durNode=dojo.query(".Duration",this.domNode);
this.timeNode=dojo.query(".Time",this.domNode);
},setMedia:function(_1){
this.media=_1;
dojo.connect(this.media,"onMetaData",this,function(_2){
this.duration=_2.duration;
this.durNode.innerHTML=this.toSeconds(this.duration);
});
dojo.connect(this.media,"onPosition",this,function(_3){
});
var _4=["onMetaData","onPosition","onStart","onBuffer","onPlay","onPause","onStop","onEnd","onError","onLoad"];
dojo.forEach(_4,function(c){
dojo.connect(this.media,c,this,c);
},this);
},onMetaData:function(_5){
this.duration=_5.duration;
this.durNode.innerHTML=this.toSeconds(this.duration);
if(this.media.title){
this.title=this.media.title;
}else{
var a=this.media.mediaUrl.split("/");
var b=a[a.length-1].split(".")[0];
this.title=b;
}
},onBuffer:function(_6){
this.isBuffering=_6;
console.warn("status onBuffer",this.isBuffering);
if(this.isBuffering){
this.setStatus("buffering...");
}else{
this.setStatus("Playing");
}
},onPosition:function(_7){
},onStart:function(){
this.setStatus("Starting");
},onPlay:function(){
this.setStatus("Playing");
},onPause:function(){
this.setStatus("Paused");
},onStop:function(){
this.setStatus("Stopped");
},onEnd:function(){
this.setStatus("Stopped");
},onError:function(_8){
var _9=_8.info.code;
if(_9=="NetStream.Play.StreamNotFound"){
_9="Stream Not Found";
}
this.setStatus("ERROR: "+_9,true);
},onLoad:function(){
this.setStatus("Loading...");
},setStatus:function(_a,_b){
if(_b){
dojo.addClass(this.titleNode,"statusError");
}else{
dojo.removeClass(this.titleNode,"statusError");
if(this.isBuffering){
_a="buffering...";
}
}
this.titleNode.innerHTML="<span class=\"statusTitle\">"+this.title+"</span> <span class=\"statusInfo\">"+_a+"</span>";
},toSeconds:function(_c){
var ts=_c.toString();
if(ts.indexOf(".")<0){
ts+=".00";
}else{
if(ts.length-ts.indexOf(".")==2){
ts+="0";
}else{
if(ts.length-ts.indexOf(".")>2){
ts=ts.substring(0,ts.indexOf(".")+3);
}
}
}
return ts;
}});
} | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/flot/plugins/jquery.flot.axislabels.js | (function($) {
"use strict";
var options = {
axisLabels: {
show: true
}
};
function AxisLabel(axisName, position, padding, placeholder, axisLabel, surface) {
this.axisName = axisName;
this.position = position;
this.padding = padding;
this.placeholder = placeholder;
this.axisLabel = axisLabel;
this.surface = surface;
this.width = 0;
this.height = 0;
this.elem = null;
}
AxisLabel.prototype.calculateSize = function() {
var axisId = this.axisName + 'Label',
layerId = axisId + 'Layer',
className = axisId + ' axisLabels';
var info = this.surface.getTextInfo(layerId, this.axisLabel, className);
this.labelWidth = info.width;
this.labelHeight = info.height;
if (this.position === 'left' || this.position === 'right') {
this.width = this.labelHeight + this.padding;
this.height = 0;
} else {
this.width = 0;
this.height = this.labelHeight + this.padding;
}
};
AxisLabel.prototype.transforms = function(degrees, x, y, svgLayer) {
var transforms = [], translate, rotate;
if (x !== 0 || y !== 0) {
translate = svgLayer.createSVGTransform();
translate.setTranslate(x, y);
transforms.push(translate);
}
if (degrees !== 0) {
rotate = svgLayer.createSVGTransform();
var centerX = Math.round(this.labelWidth / 2),
centerY = 0;
rotate.setRotate(degrees, centerX, centerY);
transforms.push(rotate);
}
return transforms;
};
AxisLabel.prototype.calculateOffsets = function(box) {
var offsets = {
x: 0,
y: 0,
degrees: 0
};
if (this.position === 'bottom') {
offsets.x = box.left + box.width / 2 - this.labelWidth / 2;
offsets.y = box.top + box.height - this.labelHeight;
} else if (this.position === 'top') {
offsets.x = box.left + box.width / 2 - this.labelWidth / 2;
offsets.y = box.top;
} else if (this.position === 'left') {
offsets.degrees = -90;
offsets.x = box.left - this.labelWidth / 2;
offsets.y = box.height / 2 + box.top;
} else if (this.position === 'right') {
offsets.degrees = 90;
offsets.x = box.left + box.width - this.labelWidth / 2;
offsets.y = box.height / 2 + box.top;
}
offsets.x = Math.round(offsets.x);
offsets.y = Math.round(offsets.y);
return offsets;
};
AxisLabel.prototype.cleanup = function() {
var axisId = this.axisName + 'Label',
layerId = axisId + 'Layer',
className = axisId + ' axisLabels';
this.surface.removeText(layerId, 0, 0, this.axisLabel, className);
};
AxisLabel.prototype.draw = function(box) {
var axisId = this.axisName + 'Label',
layerId = axisId + 'Layer',
className = axisId + ' axisLabels',
offsets = this.calculateOffsets(box),
style = {
position: 'absolute',
bottom: '',
right: '',
display: 'inline-block',
'white-space': 'nowrap'
};
var layer = this.surface.getSVGLayer(layerId);
var transforms = this.transforms(offsets.degrees, offsets.x, offsets.y, layer.parentNode);
this.surface.addText(layerId, 0, 0, this.axisLabel, className, undefined, undefined, undefined, undefined, transforms);
this.surface.render();
Object.keys(style).forEach(function(key) {
layer.style[key] = style[key];
});
};
function init(plot) {
plot.hooks.processOptions.push(function(plot, options) {
if (!options.axisLabels.show) {
return;
}
var axisLabels = {};
var defaultPadding = 2; // padding between axis and tick labels
plot.hooks.axisReserveSpace.push(function(plot, axis) {
var opts = axis.options;
var axisName = axis.direction + axis.n;
axis.labelHeight += axis.boxPosition.centerY;
axis.labelWidth += axis.boxPosition.centerX;
if (!opts || !opts.axisLabel || !axis.show) {
return;
}
var padding = opts.axisLabelPadding === undefined
? defaultPadding
: opts.axisLabelPadding;
var axisLabel = axisLabels[axisName];
if (!axisLabel) {
axisLabel = new AxisLabel(axisName,
opts.position, padding,
plot.getPlaceholder()[0], opts.axisLabel, plot.getSurface());
axisLabels[axisName] = axisLabel;
}
axisLabel.calculateSize();
// Incrementing the sizes of the tick labels.
axis.labelHeight += axisLabel.height;
axis.labelWidth += axisLabel.width;
});
// TODO - use the drawAxis hook
plot.hooks.draw.push(function(plot, ctx) {
$.each(plot.getAxes(), function(flotAxisName, axis) {
var opts = axis.options;
if (!opts || !opts.axisLabel || !axis.show) {
return;
}
var axisName = axis.direction + axis.n;
axisLabels[axisName].draw(axis.box);
});
});
plot.hooks.shutdown.push(function(plot, eventHolder) {
for (var axisName in axisLabels) {
axisLabels[axisName].cleanup();
}
});
});
};
$.plot.plugins.push({
init: init,
options: options,
name: 'axisLabels',
version: '3.0'
});
})(jQuery); | PypiClean |
/Dero-0.15.0-py3-none-any.whl/dero/reg/linmodels/reg.py | from dero.reg.linmodels.bindings.modelstr import get_model_class_by_string, _is_ols_str
from dero.reg.linmodels.bindings.input import _create_reg_df_y_x_and_lag_vars
from dero.reg.linmodels.bindings.fit import _estimate_handling_robust_and_cluster
from dero.reg.linmodels.bindings.result import _convert_linearmodels_result_to_statsmodels_result_format
from dero.reg.linmodels.bindings.fe import dummy_cols_dict_from_model, linearmodels_fe_kwarg_dict_from_fe
def linear_reg(df, yvar, xvars, entity_var=None, time_var=None, robust=True, cluster=False, cons=True, fe=None, interaction_tuples=None,
num_lags=0, lag_variables='xvars', lag_period_var='Date', lag_id_var='TICKER', lag_fill_method: str='ffill',
lag_fill_limit: int = None,
model_type='fama macbeth', **fit_kwargs):
"""
Args:
df: pandas dataframe containing regression data
yvar: str, column name of outcome y variable
xvars: list of strs, column names of x variables for regression
entity_var: str, name of variable identifying groups for panel
time_var: str, name of variable identifying time for panel
robust: bool, set to True to use heterskedasticity-robust standard errors
cluster: False or str, set to a column name to calculate standard errors within clusters
given by unique values of given column name
cons: bool, set to False to not include a constant in the regression
fe: None or str or list of strs. If a str or list of strs is passed, uses these categorical
variables to construct dummies for fixed effects.
interaction_tuples: tuple or list of tuples of column names to interact and include as xvars
num_lags: int, Number of periods to lag variables. Setting to other than 0 will activate lags
lag_variables: 'all', 'xvars', or list of strs of names of columns to lag for regressions.
lag_period_var: str, only used if lag_variables is not None. name of column which
contains period variable for lagging
lag_id_var: str, only used if lag_variables is not None. name of column which
contains identifier variable for lagging
lag_fill_method: str, 'ffill' or 'bfill' for which method to use to fill in missing rows when
creating lag variables. See pandas.DataFrame.fillna for more details
lag_fill_limit: int, maximum number of periods to go back or forward for filling
model_type: str, 'fama macbeth' for type of model
Returns:
"""
if entity_var is None or time_var is None:
raise ValueError('must pass both entity_var and time_var')
regdf, y, X, lag_variables = _create_reg_df_y_x_and_lag_vars(
df, yvar, xvars, entity_var, time_var,
cluster=cluster,
cons=cons, fe=fe,
interaction_tuples=interaction_tuples,
num_lags=num_lags,
lag_variables=lag_variables,
lag_period_var=lag_period_var,
lag_id_var=lag_id_var,
fill_method=lag_fill_method,
fill_limit=lag_fill_limit
)
fe_kwargs = linearmodels_fe_kwarg_dict_from_fe(fe, regdf)
ModelClass = get_model_class_by_string(model_type)
mod = ModelClass(y, X, **fe_kwargs)
dummy_cols_dict = dummy_cols_dict_from_model(mod, regdf)
result = _estimate_handling_robust_and_cluster(regdf, mod, robust, cluster, **fit_kwargs)
_convert_linearmodels_result_to_statsmodels_result_format(result)
result.dummy_cols_dict = dummy_cols_dict
result.cluster_variables = cluster
return result | PypiClean |
/NaverService-0.0.1.tar.gz/NaverService-0.0.1/README.md |
# Naver Services
##### ============================================================
## Overview
##### ============================================================
##### ============================================================
## IDE :: Platform Layer
##### ============================================================
mkdir project_name
mkdir package_name
git init
python3 -m venv env
source env/bin/activate
pip install --upgrade pip
pip install -r requirements.txt
##### ============================================================
## IDE :: Application Layer
##### ============================================================
### Terminal
python ide.py
### Jupyter
import os
import sys
sys.path.append(f"{os.environ['HOME']}/pjts/PROJECT_NAME")
import ide
ide.main(ide='jupyter')
ide.setup_logpath(modulepath='jupyter.MODULE_NAME')
| PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/core/logging/logtargets/LogTargetScribe.py | from JumpScale.core.enumerators import AppStatusType
from JumpScale import j
from fb303_scripts import *
from scribe import scribe
from thrift.transport import TTransport, TSocket
from thrift.protocol import TBinaryProtocol
import socket
class LogTargetScribe(object):
"""Forwards incoming logRecords to Scribe Server on localhost"""
def __init__(self, serverip="localhost", serverport=9991):
self._serverip = serverip
self._server_real_ip = socket.gethostbyname(self._serverip)
self._serverport = serverport
self._client = None
self._transport = None
self.enabled = False
self.checkTarget()
self.name = "scribe"
def checkTarget(self):
"""
check status of scribe, if accessible return True
"""
try:
result = j.cmdtools.logclient.getStatus()
except AttributeError:
result = AppStatusType.HALTED
self.enabled = (result == AppStatusType.RUNNING) and self.open()
return self.enabled
def log(self, log):
"""
forward the already formatted message to the target destination
"""
message = message.replace('/|', '@@')
try:
type, _, _, level, _, _ = message.split('|')
except:
return True#invalid message, can't extract type and level
category = "%s-%s"%(type, level)
log_entry = scribe.LogEntry(dict(category = category, message = message))
if self._client==None:
return False
try:
self._client.Log(messages=[log_entry])
except:
self.enabled = False
self.close()
return False
return True
def __eq__(self, other):
if not other:
return False
if not isinstance(other, LogTargetScribe):
return False
return (self._server_real_ip == other._server_real_ip) and (self._serverport == other._serverport)
def __str__(self):
""" string representation of a LogTargetScribe """
return 'LogTargetScribe logging to %s:%d' % (str(self._serverip),self._serverport)
__repr__ = __str__
def open(self):
"""
UDP has no notion of open, we are just preparing the thrift transport
"""
try:
if not self._transport:
socket = TSocket.TSocket(host=self._serverip, port=self._serverport)
self._transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(trans=self._transport, strictRead=False, strictWrite=False)
self._client = scribe.Client(iprot=protocol, oprot=protocol)
if not self._transport.isOpen():
self._transport.open()
return True
except NameError:
raise
except Exception, ex:
return False
def close(self):
if self._transport:
self._transport.close() | PypiClean |
/Dhelpers-0.1.5rc1.tar.gz/Dhelpers-0.1.5rc1/docs/index.rst | Welcome to Dpowers' documentation!
===================================
Source code:
`<https://github.com/dp0s/Dpowers>`_
Introduction
************
.. include:: intro.rst
Requirements
*************
- python 3.6 or later
- Currently only tested on apt based Linux systems (Debian, Ubuntu, Linux Mint).
.. toctree::
:maxdepth: 2
:caption: Preperation
preperation
.. toctree::
:maxdepth: 2
:caption: Quickstart
quickstart
.. toctree::
:maxdepth: 2
:glob:
:caption: Reference (still incomplete)
reference/*
Indices and tables
******************
* :ref:`genindex`
| PypiClean |
/Beetstream-1.2.0.tar.gz/Beetstream-1.2.0/beetsplug/beetstream/songs.py | from beetsplug.beetstream.utils import *
from beetsplug.beetstream import app, stream
from flask import g, request, Response
from beets.random import random_objs
import xml.etree.cElementTree as ET
@app.route('/rest/getSong', methods=["GET", "POST"])
@app.route('/rest/getSong.view', methods=["GET", "POST"])
def song():
res_format = request.values.get('f') or 'xml'
id = int(song_subid_to_beetid(request.values.get('id')))
song = g.lib.get_item(id)
if (is_json(res_format)):
return jsonpify(request, wrap_res("song", map_song(song)))
else:
root = get_xml_root()
s = ET.SubElement(root, 'song')
map_song_xml(s, song)
return Response(xml_to_string(root), mimetype='text/xml')
@app.route('/rest/getSongsByGenre', methods=["GET", "POST"])
@app.route('/rest/getSongsByGenre.view', methods=["GET", "POST"])
def songs_by_genre():
res_format = request.values.get('f') or 'xml'
genre = request.values.get('genre')
count = int(request.values.get('count') or 10)
offset = int(request.values.get('offset') or 0)
songs = handleSizeAndOffset(list(g.lib.items('genre:' + genre.replace("'", "\\'"))), count, offset)
if (is_json(res_format)):
return jsonpify(request, wrap_res("songsByGenre", {
"song": list(map(map_song, songs))
}))
else:
root = get_xml_root()
songs_by_genre = ET.SubElement(root, 'songsByGenre')
for song in songs:
s = ET.SubElement(songs_by_genre, 'song')
map_song_xml(s, song)
return Response(xml_to_string(root), mimetype='text/xml')
@app.route('/rest/stream', methods=["GET", "POST"])
@app.route('/rest/stream.view', methods=["GET", "POST"])
def stream_song():
maxBitrate = int(request.values.get('maxBitRate') or 0)
format = request.values.get('format')
id = int(song_subid_to_beetid(request.values.get('id')))
item = g.lib.get_item(id)
itemPath = item.path.decode('utf-8')
if app.config['never_transcode'] or format == 'raw' or maxBitrate <= 0 or item.bitrate <= maxBitrate * 1000:
return stream.send_raw_file(itemPath)
else:
return stream.try_to_transcode(itemPath, maxBitrate)
@app.route('/rest/download', methods=["GET", "POST"])
@app.route('/rest/download.view', methods=["GET", "POST"])
def download_song():
id = int(song_subid_to_beetid(request.values.get('id')))
item = g.lib.get_item(id)
return stream.send_raw_file(item.path.decode('utf-8'))
@app.route('/rest/getRandomSongs', methods=["GET", "POST"])
@app.route('/rest/getRandomSongs.view', methods=["GET", "POST"])
def random_songs():
res_format = request.values.get('f') or 'xml'
size = int(request.values.get('size') or 10)
songs = list(g.lib.items())
songs = random_objs(songs, -1, size)
if (is_json(res_format)):
return jsonpify(request, wrap_res("randomSongs", {
"song": list(map(map_song, songs))
}))
else:
root = get_xml_root()
album = ET.SubElement(root, 'randomSongs')
for song in songs:
s = ET.SubElement(album, 'song')
map_song_xml(s, song)
return Response(xml_to_string(root), mimetype='text/xml')
# TODO link with Last.fm or ListenBrainz
@app.route('/rest/getTopSongs', methods=["GET", "POST"])
@app.route('/rest/getTopSongs.view', methods=["GET", "POST"])
def top_songs():
res_format = request.values.get('f') or 'xml'
if (is_json(res_format)):
return jsonpify(request, wrap_res("topSongs", {}))
else:
root = get_xml_root()
ET.SubElement(root, 'topSongs')
return Response(xml_to_string(root), mimetype='text/xml')
@app.route('/rest/getStarred', methods=["GET", "POST"])
@app.route('/rest/getStarred.view', methods=["GET", "POST"])
def starred_songs():
res_format = request.values.get('f') or 'xml'
if (is_json(res_format)):
return jsonpify(request, wrap_res("starred", {
"song": []
}))
else:
root = get_xml_root()
ET.SubElement(root, 'starred')
return Response(xml_to_string(root), mimetype='text/xml')
@app.route('/rest/getStarred2', methods=["GET", "POST"])
@app.route('/rest/getStarred2.view', methods=["GET", "POST"])
def starred2_songs():
res_format = request.values.get('f') or 'xml'
if (is_json(res_format)):
return jsonpify(request, wrap_res("starred2", {
"song": []
}))
else:
root = get_xml_root()
ET.SubElement(root, 'starred2')
return Response(xml_to_string(root), mimetype='text/xml') | PypiClean |
/BabitMF_GPU-0.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/bmf/python_modules/Module_rotate_gpu/rotate_gpu.py | import re
from math import pi, cos, tan, sqrt
import numpy
from bmf import *
import bmf.hml.hmp as hmp
import cv2, cvcuda
class rotate_gpu(Module):
def __get_algo(self, algo_str):
return {
'area': cvcuda.Interp.AREA,
'cubic': cvcuda.Interp.CUBIC,
'linear': cvcuda.Interp.LINEAR,
'nearest': cvcuda.Interp.NEAREST
}.get(algo_str, cvcuda.Interp.LINEAR)
def __get_rotation(self, option):
self.angle_deg = 0
# self.shift = None
self.center = None
self.scale = 1
self.algo = cvcuda.Interp.LINEAR
if 'angle' in option.keys():
self.angle_deg = eval(option['angle'].lower()) * 180 / pi
if 'angle_deg' in option.keys():
self.angle_deg = option['angle_deg']
if 'center' in option.keys():
split = re.split('\*|x|,|:', option['center'])
self.center = (int(split[0]), int(split[1]))
if 'scale' in option.keys():
self.scale = option['scale']
if 'algo' in option.keys():
self.algo = self.__get_algo(option['algo'])
def __init__(self, node, option=None):
self.node_ = node
self.option_ = option
self.eof_received_ = False
self.__get_rotation(option)
self.i420info = hmp.PixelInfo(hmp.PixelFormat.kPF_YUV420P,
hmp.ColorSpace.kCS_BT470BG,
hmp.ColorRange.kCR_MPEG)
self.u420info = hmp.PixelInfo(hmp.PixelFormat.kPF_YUV420P10LE,
hmp.ColorSpace.kCS_BT2020_CL,
hmp.ColorRange.kCR_MPEG)
self.i420_out = None
self.pinfo_map = {
hmp.PixelFormat.kPF_NV12: self.i420info,
hmp.PixelFormat.kPF_P010LE: self.u420info
}
anglet = hmp.ones(
(4, ), dtype=hmp.kFloat64, device='cuda') * self.angle_deg
self.cvangle = cvcuda.as_tensor(anglet)
def process(self, task):
# get input and output packet queue
input_queue = task.get_inputs()[0]
output_queue = task.get_outputs()[0]
# add all input frames into frame cache
while not input_queue.empty():
in_pkt = input_queue.get()
if in_pkt.timestamp == Timestamp.EOF:
# we should done all frames processing in following loop
self.eof_received_ = True
continue
in_frame = in_pkt.get(VideoFrame)
if (in_frame.frame().device() == hmp.Device('cpu')):
in_frame = in_frame.cuda()
tensor_list = in_frame.frame().data()
frame_out = hmp.Frame(in_frame.width,
in_frame.height,
in_frame.frame().pix_info(),
device='cuda')
out_list = frame_out.data()
stream = hmp.current_stream(hmp.kCUDA)
cvstream = cvcuda.cuda.as_stream(stream.handle())
if (self.center is None):
center = (in_frame.width // 2, in_frame.height // 2)
center = numpy.ones((4, 2), dtype=int) * numpy.array(
(in_frame.width // 2, in_frame.height // 2), dtype=int)
# deal with nv12 special case
if (in_frame.frame().format() == hmp.PixelFormat.kPF_NV12 or
in_frame.frame().format() == hmp.PixelFormat.kPF_P010LE):
cvimg_batch = cvcuda.ImageBatchVarShape(3)
cvimg_batch_out = cvcuda.ImageBatchVarShape(3)
center[1:3] //= 2
xform = numpy.zeros((4, 6), dtype='float32')
xform[:] = [
cv2.getRotationMatrix2D(
c.astype(float), self.angle_deg,
self.scale).astype('float32').flatten() for c in center
]
cvxform = cvcuda.as_tensor(hmp.from_numpy(xform).cuda())
pinfo = self.pinfo_map[in_frame.frame().format()]
in_420 = hmp.Frame(in_frame.width,
in_frame.height,
pinfo,
device='cuda')
out_420 = hmp.Frame(in_frame.width,
in_frame.height,
pinfo,
device='cuda')
hmp.img.yuv_to_yuv(in_420.data(),
in_frame.frame().data(), pinfo,
in_frame.frame().pix_info())
in_list = in_420.data()
out_list = out_420.data()
fill = None
if in_frame.frame().format() == hmp.PixelFormat.kPF_NV12:
fill = numpy.array((0, 127, 127))
else:
fill = numpy.array((0, 511, 511))
out_list[0].fill_(int(fill[0]))
out_list[1].fill_(int(fill[1]))
out_list[2].fill_(int(fill[2]))
cvimg_batch.pushback([cvcuda.as_image(x) for x in in_list])
cvimg_batch_out.pushback(
[cvcuda.as_image(x) for x in out_list])
cvcuda.warp_affine_into(cvimg_batch_out,
cvimg_batch,
xform=cvxform,
flags=self.algo,
border_mode=cvcuda.Border.CONSTANT,
border_value=fill.astype('float32'),
stream=cvstream)
hmp.img.yuv_to_yuv(frame_out.data(), out_420.data(),
frame_out.pix_info(), out_420.pix_info())
# other pixel formats, e.g. yuv420, rgb
else:
cvimg_batch = cvcuda.ImageBatchVarShape(
in_frame.frame().nplanes())
cvimg_batch_out = cvcuda.ImageBatchVarShape(
in_frame.frame().nplanes())
# t3 = torch.ones((in_frame.frame().nplanes(),), dtype=torch.double, device='cuda') * self.flip_code
if (in_frame.frame().format() == hmp.PixelFormat.kPF_YUV420P
or in_frame.frame().format()
== hmp.PixelFormat.kPF_YUV420P10):
center[1:3] //= 2
xform = numpy.zeros((4, 6), dtype='float32')
xform[:] = [
cv2.getRotationMatrix2D(
c.astype(float), self.angle_deg,
self.scale).astype('float32').flatten() for c in center
]
cvxform = cvcuda.as_tensor(hmp.from_numpy(xform).cuda())
fill = numpy.array((0, 127, 127))
if in_frame.frame().format() == hmp.PixelFormat.kPF_YUV420P10:
fill = numpy.array((0, 511, 511))
for t, f in zip(tensor_list, out_list):
cvimg = cvcuda.as_image(t)
cvimg_out = cvcuda.as_image(f)
cvimg_batch.pushback(cvimg)
cvimg_batch_out.pushback(cvimg_out)
cvcuda.warp_affine_into(cvimg_batch_out,
cvimg_batch,
xform=cvxform,
flags=self.algo,
border_mode=cvcuda.Border.CONSTANT,
border_value=fill.astype('float32'),
stream=cvstream)
videoframe_out = VideoFrame(frame_out)
videoframe_out.pts = in_frame.pts
videoframe_out.time_base = in_frame.time_base
out_pkt = Packet(videoframe_out)
out_pkt.timestamp = videoframe_out.pts
output_queue.put(out_pkt)
if self.eof_received_:
output_queue.put(Packet.generate_eof_packet())
Log.log_node(LogLevel.DEBUG, self.node_, 'output stream', 'done')
task.set_timestamp(Timestamp.DONE)
return ProcessResult.OK
def register_rotate_gpu_info(info):
info.module_description = "Builtin module for rotate using GPU"
info.module_tag = ModuleTag.TAG_DEVICE_HWACCEL | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/aproba/README.md | aproba
======
A ridiculously light-weight function argument validator
```
var validate = require("aproba")
function myfunc(a, b, c) {
// `a` must be a string, `b` a number, `c` a function
validate('SNF', arguments) // [a,b,c] is also valid
}
myfunc('test', 23, function () {}) // ok
myfunc(123, 23, function () {}) // type error
myfunc('test', 23) // missing arg error
myfunc('test', 23, function () {}, true) // too many args error
```
Valid types are:
| type | description
| :--: | :----------
| * | matches any type
| A | `Array.isArray` OR an `arguments` object
| S | typeof == string
| N | typeof == number
| F | typeof == function
| O | typeof == object and not type A and not type E
| B | typeof == boolean
| E | `instanceof Error` OR `null` **(special: see below)**
| Z | == `null`
Validation failures throw one of three exception types, distinguished by a
`code` property of `EMISSINGARG`, `EINVALIDTYPE` or `ETOOMANYARGS`.
If you pass in an invalid type then it will throw with a code of
`EUNKNOWNTYPE`.
If an **error** argument is found and is not null then the remaining
arguments are optional. That is, if you say `ESO` then that's like using a
non-magical `E` in: `E|ESO|ZSO`.
### But I have optional arguments?!
You can provide more than one signature by separating them with pipes `|`.
If any signature matches the arguments then they'll be considered valid.
So for example, say you wanted to write a signature for
`fs.createWriteStream`. The docs for it describe it thusly:
```
fs.createWriteStream(path[, options])
```
This would be a signature of `SO|S`. That is, a string and and object, or
just a string.
Now, if you read the full `fs` docs, you'll see that actually path can ALSO
be a buffer. And options can be a string, that is:
```
path <String> | <Buffer>
options <String> | <Object>
```
To reproduce this you have to fully enumerate all of the possible
combinations and that implies a signature of `SO|SS|OO|OS|S|O`. The
awkwardness is a feature: It reminds you of the complexity you're adding to
your API when you do this sort of thing.
### Browser support
This has no dependencies and should work in browsers, though you'll have
noisier stack traces.
### Why this exists
I wanted a very simple argument validator. It needed to do two things:
1. Be more concise and easier to use than assertions
2. Not encourage an infinite bikeshed of DSLs
This is why types are specified by a single character and there's no such
thing as an optional argument.
This is not intended to validate user data. This is specifically about
asserting the interface of your functions.
If you need greater validation, I encourage you to write them by hand or
look elsewhere.
| PypiClean |
/EvidentialToolBus-1.0.1.tar.gz/EvidentialToolBus-1.0.1/etb/wrappers/builtins.py | from __future__ import division
import uuid
import time
import logging
import subprocess
from etb import terms
from etb.wrapper import Tool, Substitutions, Success, Failure, Errors
class Builtins(Tool):
"""Some builtin predicates.
>>> b = Builtins()
>>> b.different(terms.mk_term('a'), terms.mk_term('b'))
[subst()]
>>> b.different(terms.mk_term('a'), terms.mk_term('a'))
[]
"""
@Tool.sync
@Tool.volatile
@Tool.predicate('+a: value, +b: value')
def different(self, a, b):
"""Are the two terms different?"""
if a != b:
return Success(self)
else:
return Failure(self)
@Tool.sync
@Tool.volatile
@Tool.predicate('a: value, b: value')
def equal(self, a, b):
"""Unify the two terms"""
if b.is_var():
if a.is_var():
return Failure(self)
subst = a.unify(b)
return Failure(self) if subst is None else Substitutions(self, [subst])
if a.is_var():
subst = b.unify(a)
return Failure(self) if subst is None else Substitutions(self, [subst])
elif a.val == b.val:
return Success(self)
else:
return Failure(self)
@Tool.sync
@Tool.volatile
@Tool.predicate('a: value, b: value, sum: value')
def plus(self, a, b, sum):
"""Like Prolog sum; at most one argument may be a variable,
if none are checks whether sum=a+b, else binds the variable
accordingly.
"""
if (a.is_var() and (b.is_var() or sum.is_var())) or (b.is_var() and sum.is_var()):
return Errors(self, ["Only one variable allowed in plus."])
if ((not (a.is_var() or a.is_numconst()))
or (not (b.is_var() or b.is_numconst()))
or (not (sum.is_var() or sum.is_numconst()))):
return Errors(self, ["plus expects numbers"])
if a.is_var():
return Substitutions(self, [self.bindResult(a, sum.num - b.num)])
elif b.is_var():
return Substitutions(self, [self.bindResult(b, sum.num - a.num)])
elif sum.is_var():
return Substitutions(self, [self.bindResult(sum, a.num + b.num)])
else:
res = sum.num == a.num + b.num
return Success(self) if res else Failure(self)
@Tool.sync
@Tool.volatile
@Tool.predicate('a: value, b: value, sum: value')
def times(self, a, b, prod):
"""Like Prolog times; at most one argument may be a variable,
if none are checks whether prod=a+b, else binds the variable
accordingly.
"""
if (a.is_var() and (b.is_var() or prod.is_var())) or (b.is_var() and prod.is_var()):
return Errors(self, ["Only one variable allowed in times."])
if ((not (a.is_var() or a.is_numconst()))
or (not (b.is_var() or b.is_numconst()))
or (not (prod.is_var() or prod.is_numconst()))):
return Errors(self, ["times expects numbers"])
if a.is_var():
if b.num == 0:
return Errors(self, ["times: divide by 0 not allowed"])
else:
return Substitutions(self, [self.bindResult(a, prod.num / b.num)])
elif b.is_var():
if a.num == 0:
return Errors(self, ["times: divide by 0 not allowed"])
else:
return Substitutions(self, [self.bindResult(b, prod.num / a.num)])
elif prod.is_var():
return Substitutions(self, [self.bindResult(prod, a.num * b.num)])
else:
res = prod.num == a.num * b.num
return Success(self) if res else Failure(self)
@Tool.predicate("-v: value")
def nil(self, v):
"""Bind v to the empty list"""
if v.is_var():
return Substitutions(self, [ self.bindResult(v, terms.mk_term([])) ])
else:
return Errors(self, [ "nil passed a non variable: %s" % v ])
@Tool.predicate("+head: value, +tail: value, -out: value")
def cons(self, head, tail, out):
"""Create the cons of head to tail bound to variable out"""
if tail.is_const() and tail.val is None:
res = [head]
else:
res = [head.val] + list(tail.get_args())
return Substitutions(self, [ self.bindResult(out, res)])
@Tool.sync
@Tool.volatile
@Tool.predicate('-token: Value')
def new(self, tok):
"""Bind the argument to a fresh, unique symbol."""
if tok.is_var():
return Substitutions(self, [{tok: terms.StringConst(uuid.uuid4())}])
else:
return Failure(self) # always fails with a const argument
@Tool.volatile
@Tool.predicate('-token: Value')
def now(self, tok):
"""Binds the argument to the current unix timestamp
(of this computer)"""
if tok.is_var():
return Substitutions(self, [{tok: terms.StringConst(time.time())}])
else:
return Failure(self)
@Tool.volatile
@Tool.predicate('+cmd: Value, -result: Value')
def popen(self, cmd, result):
"""Runs a shell command and get the (text) result back."""
if not cmd.is_array():
return Failure(self) # TODO error claims
cmd = list(unicode(x) for x in cmd.get_args())
try:
shell_res = subprocess.check_output(cmd)
return Substitutions(self, [{result: terms.StringConst(shell_res)}])
except subprocess.CalledProcessError as e:
return Failure(self) # TODO error claims
@Tool.predicate('+cmd: Value, +timestamp: Value, -result: Value')
def popen_at(self, cmd, timestamp, result):
"""Runs a shell command and get the (text) result back. The timestamp
can be anything, its purpose is to repeat an action that would
otherwise be cached (like several printf of the same string)
"""
if not cmd.is_array():
return Failure(self) # TODO error claims
cmd = list(unicode(x) for x in cmd.get_args())
try:
shell_res = subprocess.check_output(cmd)
return Substitutions(self, [{result: terms.StringConst(shell_res)}])
except subprocess.CalledProcessError as e:
return Failure(self) # TODO error claims
@Tool.sync
@Tool.volatile
@Tool.predicate('+goal: value, -facts: value')
def match_facts(self, goal, facts):
"""Put in facts the sorted list of facts that match goal."""
# get the actual list of facts (sorted)
print goal, facts
_, goal = goal.negative_rename() # avoid collisions
with self._etb.logic_state:
found_facts = list(subst(goal) for subst in \
self._etb.logic_state.match_facts_against(goal))
print found_facts
found_facts.sort()
found_facts = terms.Array(found_facts)
# bind/check
if facts.is_var():
return Substitutions(self, [{ facts: found_facts}])
elif facts == found_facts:
return Success(self)
else:
return Failure(self)
@Tool.sync
@Tool.predicate('+inf: file, +outname: value, -outf: file')
def remove_cr(self, inf, outname, outf):
with open(inf['file']) as infh:
with open(outname.val, 'wb') as outfh:
for line in infh:
line = line.rstrip()
outfh.write(line + '\n')
outfref = self.fs.put_file(outname.val)
return Substitutions(self, [ self.bindResult(outf, outfref) ])
def register(etb):
etb.add_tool(Builtins(etb)) | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.