id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/AstroCabTools-1.5.1.tar.gz/AstroCabTools-1.5.1/astrocabtools/mrs_subviz/src/utils/ellipse_xy_transformations.py | from photutils import EllipticalAperture, aperture_photometry
import math
__all__=["transform_xy_ellipse", "transform_ellipse_subband", "transform_ellipse_subband_from_coord"]
def transform_xy_ellipse(centerX, centerY, aAxis, bAxis, cubeModel):
""" Update rectangle data widgets and image object attributes
:param float centerX: center x coordinate
:param float centerY: center y coordinate
:param float a: long axis value
:param float b: short axis value
:param object cubeModel: current data with standarized structure of the cube
:return: flux, wavelength and aperture values
"""
fValues = []
wValues = []
#Because it gets all the flux on a pixel, it needs to get the area of it rather
#the sum of it
pixelArea = (cubeModel.meta.wcsinfo.cdelt1 * 3600.) * (cubeModel.meta.wcsinfo.cdelt2 * 3600.)
position = [(centerX-1, centerY-1)]
aperture = EllipticalAperture(position,aAxis/2, bAxis/2)
d2w = cubeModel.meta.wcs.get_transform('detector', 'world')
for i in range(cubeModel.weightmap.shape[0]):
phot_table = aperture_photometry(cubeModel.data[i], aperture, method= 'exact')
fValues.append(phot_table['aperture_sum'][0]*pixelArea)
ra, dec, wavelength = d2w(1,1,i)
wValues.append(wavelength)
return fValues, wValues, aperture
def transform_ellipse_subband(from_model, to_model, patchesData, lambdaCube):
"""
Transform the figure coordinates from one cube to other
:param object from_model: initial cube
:param object to_model: cube where the data is gonna be transformed
:param dict patchesData: coordinates of the figure
:param int lambdaCube: lambda value to be used in the transformation
:return: dictionary with the new coordinates
"""
d2w = from_model.meta.wcs.get_transform('detector', 'world')
w2d = to_model.meta.wcs.get_transform('world', 'detector')
leftAAxis = (abs(patchesData['aAxis']/2. - patchesData['centerX']), patchesData['centerY'])
rightAAxis = (patchesData['aAxis']/2. + patchesData['centerX'], patchesData['centerY'])
topBAxis = (patchesData['centerX'], patchesData['bAxis']/2. + patchesData['centerY'])
bottomBAxis = (patchesData['centerX'],abs(patchesData['bAxis']/2. - patchesData['centerY']))
ra, dec, wavelength = d2w(patchesData['centerX'], patchesData['centerY'], lambdaCube)
x, y, _ = w2d(ra, dec, wavelength)
patchesData['centerX'] = x
patchesData['centerY'] = y
ra, dec, wavelength = d2w(leftAAxis[0], leftAAxis[1], lambdaCube)
x, y, _ = w2d(ra, dec, wavelength)
leftAAxis = (x, y)
ra, dec, wavelength = d2w(rightAAxis[0], rightAAxis[1], lambdaCube)
x, y, _ = w2d(ra, dec, wavelength)
rightAAxis = (x,y)
ra, dec, wavelength = d2w(topBAxis[0], topBAxis[1], lambdaCube)
x, y, _ = w2d(ra, dec, wavelength)
topBAxis = (x,y)
ra, dec, wavelength = d2w(bottomBAxis[0], bottomBAxis[1], lambdaCube)
x, y, _ = w2d(ra, dec, wavelength)
bottomBAxis = (x,y)
patchesData['aAxis'] = abs(rightAAxis[0] - leftAAxis[0])
patchesData['bAxis'] = abs(topBAxis[1] - bottomBAxis[1])
return patchesData
def transform_ellipse_subband_from_coord(from_model, patchesData, wavelengthValue):
"""
Transform the figure coordinates from RA and DEC to pixel of the same cube
:param object from_model: cube where the data is gonna be transformed
:param dict patchesData: coordinates of the figure
:param float wavelenghtValue: wavelength value to be used in the transformation
:return: dictionary with pixel coordinates
"""
w2d = from_model.meta.wcs.get_transform('world', 'detector')
leftAAxis = (abs(patchesData['aAxis']/2. - patchesData['centerX']), patchesData['centerY'])
rightAAxis = (patchesData['aAxis']/2. + patchesData['centerX'], patchesData['centerY'])
topBAxis = (patchesData['centerX'], patchesData['bAxis']/2. + patchesData['centerY'])
bottomBAxis = (patchesData['centerX'],abs(patchesData['bAxis']/2. - patchesData['centerY']))
x, y, _ = w2d(patchesData['centerX'], patchesData['centerY'], wavelengthValue)
patchesData['centerX'] = x
patchesData['centerY'] = y
x, y, _ = w2d(leftAAxis[0], leftAAxis[1], wavelengthValue)
leftAAxis = (x, y)
x, y, _= w2d(rightAAxis[0], rightAAxis[1], wavelengthValue)
rightAAxis = (x,y)
x, y, _ = w2d(topBAxis[0], topBAxis[1], wavelengthValue)
topBAxis = (x,y)
x, y, _= w2d(bottomBAxis[0], bottomBAxis[1], wavelengthValue)
bottomBAxis = (x,y)
patchesData['aAxis'] = abs(rightAAxis[0] - leftAAxis[0])
patchesData['bAxis'] = abs(topBAxis[1] - bottomBAxis[1])
return patchesData | PypiClean |
/Exegol-4.2.5.tar.gz/Exegol-4.2.5/exegol/config/UserConfig.py | from pathlib import Path
from typing import List
from exegol.config.ConstantConfig import ConstantConfig
from exegol.console.ConsoleFormat import boolFormatter
from exegol.utils.DataFileUtils import DataFileUtils
from exegol.utils.MetaSingleton import MetaSingleton
class UserConfig(DataFileUtils, metaclass=MetaSingleton):
"""This class allows loading user defined configurations"""
# Static choices
start_shell_options = {'zsh', 'bash', 'tmux'}
shell_logging_method_options = {'script', 'asciinema'}
def __init__(self):
# Defaults User config
self.private_volume_path: Path = ConstantConfig.exegol_config_path / "workspaces"
self.my_resources_path: Path = ConstantConfig.exegol_config_path / "my-resources"
self.exegol_resources_path: Path = self.__default_resource_location('exegol-resources')
self.auto_check_updates: bool = True
self.auto_remove_images: bool = True
self.auto_update_workspace_fs: bool = False
self.default_start_shell: str = "zsh"
self.shell_logging_method: str = "asciinema"
self.shell_logging_compress: bool = True
super().__init__("config.yml", "yml")
def _build_file_content(self):
config = f"""# Exegol configuration
# Volume path can be changed at any time but existing containers will not be affected by the update
volumes:
# The my-resources volume is a storage space dedicated to the user to customize his environment and tools. This volume can be shared across all exegol containers.
# Attention! The permissions of this folder (and subfolders) will be updated to share read/write rights between the host (user) and the container (root). Do not modify this path to a folder on which the permissions (chmod) should not be modified.
my_resources_path: {self.my_resources_path}
# Exegol resources are data and static tools downloaded in addition to docker images. These tools are complementary and are accessible directly from the host.
exegol_resources_path: {self.exegol_resources_path}
# When containers do not have an explicitly declared workspace, a dedicated folder will be created at this location to share the workspace with the host but also to save the data after deleting the container
private_workspace_path: {self.private_volume_path}
config:
# Enables automatic check for wrapper updates
auto_check_update: {self.auto_check_updates}
# Automatically remove outdated image when they are no longer used
auto_remove_image: {self.auto_remove_images}
# Automatically modifies the permissions of folders and sub-folders in your workspace by default to enable file sharing between the container with your host user.
auto_update_workspace_fs: {self.auto_update_workspace_fs}
# Default shell command to start
default_start_shell: {self.default_start_shell}
# Change the configuration of the shell logging functionality
shell_logging:
#Choice of the method used to record the sessions (script or asciinema)
logging_method: {self.shell_logging_method}
# Enable automatic compression of log files (with gzip)
enable_log_compression: {self.shell_logging_compress}
"""
# TODO handle default image selection
# TODO handle default start container
# TODO add custom build profiles path
return config
@staticmethod
def __default_resource_location(folder_name: str) -> Path:
local_src = ConstantConfig.src_root_path_obj / folder_name
if local_src.is_dir():
# If exegol is clone from github, exegol-resources submodule is accessible from root src
return local_src
else:
# Default path for pip installation
return ConstantConfig.exegol_config_path / folder_name
def _process_data(self):
# Volume section
volumes_data = self._raw_data.get("volumes", {})
# Catch existing but empty section
if volumes_data is None:
volumes_data = {}
self.my_resources_path = self._load_config_path(volumes_data, 'my_resources_path', self.my_resources_path)
self.private_volume_path = self._load_config_path(volumes_data, 'private_workspace_path', self.private_volume_path)
self.exegol_resources_path = self._load_config_path(volumes_data, 'exegol_resources_path', self.exegol_resources_path)
# Config section
config_data = self._raw_data.get("config", {})
# Catch existing but empty section
if config_data is None:
config_data = {}
self.auto_check_updates = self._load_config_bool(config_data, 'auto_check_update', self.auto_check_updates)
self.auto_remove_images = self._load_config_bool(config_data, 'auto_remove_image', self.auto_remove_images)
self.auto_update_workspace_fs = self._load_config_bool(config_data, 'auto_update_workspace_fs', self.auto_update_workspace_fs)
self.default_start_shell = self._load_config_str(config_data, 'default_start_shell', self.default_start_shell, choices=self.start_shell_options)
# Shell_logging section
shell_logging_data = config_data.get("shell_logging", {})
self.shell_logging_method = self._load_config_str(shell_logging_data, 'logging_method', self.shell_logging_method, choices=self.shell_logging_method_options)
self.shell_logging_compress = self._load_config_bool(shell_logging_data, 'enable_log_compression', self.shell_logging_compress)
def get_configs(self) -> List[str]:
"""User configs getter each options"""
configs = [
f"User config file: [magenta]{self._file_path}[/magenta]",
f"Private workspace: [magenta]{self.private_volume_path}[/magenta]",
f"Exegol resources: [magenta]{self.exegol_resources_path}[/magenta]",
f"My resources: [magenta]{self.my_resources_path}[/magenta]",
f"Auto-check updates: {boolFormatter(self.auto_check_updates)}",
f"Auto-remove images: {boolFormatter(self.auto_remove_images)}",
f"Auto-update fs: {boolFormatter(self.auto_update_workspace_fs)}",
f"Default start shell: [blue]{self.default_start_shell}[/blue]",
f"Shell logging method: [blue]{self.shell_logging_method}[/blue]",
f"Shell logging compression: {boolFormatter(self.shell_logging_compress)}",
]
# TUI can't be called from here to avoid circular importation
return configs | PypiClean |
/Anthrax-0.1.0.tar.bz2/Anthrax-0.1.0/src/anthrax/field/text.py | import unicodedata
import re
import abc
from anthrax.field.base import Field
from anthrax import widget as w
from gettext import gettext as _
from anthrax.exc import ValidationError, MissingData
class TextField(Field):
"""Simple field that represents a string. Inherits
regexp, min_len, and max_len arguents available."""
widgets = [w.TextInput, w.LongTextInput]
def to_python(self, value, bf):
return super(TextField, self).to_python(value, bf)
def from_python(self, value, bf):
return super(TextField, self).from_python(value, bf)
class EmailField(TextField):
"""Like TextField, but checks input to be correct email."""
regexp = r'^([a-z0-9-]+\.)*[a-z0-9-]+@([a-z0-9-]+\.)*[a-z0-9-]+$'
regexp_message = _('Valid E-mail address required')
class MirrorField(TextField):
"""A text-field which content somehow depends on another field.
WARNING: Current implementation limits its use to a single container.
"""
mirrored = None
@abc.abstractproperty
def force_mirror(self):
"""Whether the mirroring is mandatory or just a suggestion."""
@abc.abstractmethod
def mirror_filter(self, mirrored):
"""The method that will be called to perform the transformation."""
def to_python(self, value, bf):
value = super(MirrorField, self).to_python(value, bf)
if value:
if self.force_mirror:
raise ValidationError('This file should be left blank!')
else:
return value
else:
if bf.bound_mirrored.value is None:
raise MissingData(bf.bound_mirrored.name)
value = self.mirror_filter(bf.bound_mirrored)
return value
def bind(self, container):
if self.mirrored is None:
raise TypeError('No field set to mirror!')
if isinstance(self.mirrored, str):
self.mirrored = container.__fields__[self.mirrored]
bf = super(MirrorField, self).bind(container)
bf.bound_mirrored = self.mirrored.bind(container)
return bf
class SlugField(MirrorField):
"""A field that contains only lowercase letters, numbers and minus signs.
The value can be entered manually or can reflect other field. The frontend
can also provide dynamic suggestion for the value.TextField"""
force_mirror = False
def mirror_filter(self, mirrored):
value = mirrored.value
value = value.replace('ł', 'l')
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = value.decode('ascii')
return re.sub('\W+', '-', value.lower()) | PypiClean |
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/inginious/frontend/task_factory.py | from os.path import splitext
from inginious.common.filesystems import FileSystemProvider
from inginious.common.log import get_course_logger
from inginious.common.base import id_checker, get_json_or_yaml
from inginious.common.task_file_readers.yaml_reader import TaskYAMLFileReader
from inginious.common.exceptions import InvalidNameException, TaskNotFoundException, \
TaskUnreadableException, TaskReaderNotFoundException, TaskAlreadyExistsException
from inginious.frontend.tasks import Task
class TaskFactory(object):
""" Load courses from disk """
def __init__(self, filesystem: FileSystemProvider, plugin_manager, task_problem_types):
self._filesystem = filesystem
self._plugin_manager = plugin_manager
self._cache = {}
self._task_file_managers = {}
self._task_problem_types = task_problem_types
self.add_custom_task_file_manager(TaskYAMLFileReader())
def add_problem_type(self, problem_type):
"""
:param problem_type: Problem class
"""
self._task_problem_types.update({problem_type.get_type(): problem_type})
def get_task(self, course, taskid):
"""
:param course: a Course object
:param taskid: the task id of the task
:raise: InvalidNameException, TaskNotFoundException, TaskUnreadableException
:return: an object representing the task, of the type given in the constructor
"""
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
if self._cache_update_needed(course, taskid):
self._update_cache(course, taskid)
return self._cache[(course.get_id(), taskid)][0]
def get_task_descriptor_content(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise: InvalidNameException, TaskNotFoundException, TaskUnreadableException
:return: the content of the task descriptor, as a dict
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
descriptor_path, descriptor_manager = self._get_task_descriptor_info(courseid, taskid)
try:
task_content = descriptor_manager.load(self.get_task_fs(courseid, taskid).get(descriptor_path))
except Exception as e:
raise TaskUnreadableException(str(e))
return task_content
def get_task_descriptor_extension(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise: InvalidNameException, TaskNotFoundException
:return: the current extension of the task descriptor
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
descriptor_path = self._get_task_descriptor_info(courseid, taskid)[0]
return splitext(descriptor_path)[1]
def get_task_fs(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise: InvalidNameException
:return: A FileSystemProvider to the folder containing the task files
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
return self._filesystem.from_subfolder(courseid).from_subfolder(taskid)
def update_task_descriptor_content(self, courseid, taskid, content, force_extension=None):
"""
Update the task descriptor with the dict in content
:param courseid: the course id of the course
:param taskid: the task id of the task
:param content: the content to put in the task file
:param force_extension: If None, save it the same format. Else, save with the given extension
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
if force_extension is None:
path_to_descriptor, descriptor_manager = self._get_task_descriptor_info(courseid, taskid)
elif force_extension in self.get_available_task_file_extensions():
path_to_descriptor = "task." + force_extension
descriptor_manager = self._task_file_managers[force_extension]
else:
raise TaskReaderNotFoundException()
try:
self.get_task_fs(courseid, taskid).put(path_to_descriptor, descriptor_manager.dump(content))
except:
raise TaskNotFoundException()
def get_readable_tasks(self, course):
""" Returns the list of all available tasks in a course """
course_fs = self._filesystem.from_subfolder(course.get_id())
tasks = [
task[0:len(task)-1] # remove trailing /
for task in course_fs.list(folders=True, files=False, recursive=False)
if self._task_file_exists(course_fs.from_subfolder(task))]
return tasks
def _task_file_exists(self, task_fs):
""" Returns true if a task file exists in this directory """
for filename in ["task.{}".format(ext) for ext in self.get_available_task_file_extensions()]:
if task_fs.exists(filename):
return True
return False
def delete_all_possible_task_files(self, courseid, taskid):
""" Deletes all possibles task files in directory, to allow to change the format """
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(courseid, taskid)
for ext in self.get_available_task_file_extensions():
try:
task_fs.delete("task."+ext)
except:
pass
def get_all_tasks(self, course):
"""
:return: a table containing taskid=>Task pairs
"""
tasks = self.get_readable_tasks(course)
output = {}
for task in tasks:
try:
output[task] = self.get_task(course, task)
except:
pass
return output
def _get_task_descriptor_info(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException, TaskNotFoundException
:return: a tuple, containing:
(descriptor filename,
task file manager for the descriptor)
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(courseid, taskid)
for ext, task_file_manager in self._task_file_managers.items():
if task_fs.exists("task."+ext):
return "task." + ext, task_file_manager
raise TaskNotFoundException()
def add_custom_task_file_manager(self, task_file_manager):
""" Add a custom task file manager """
self._task_file_managers[task_file_manager.get_ext()] = task_file_manager
def get_available_task_file_extensions(self):
""" Get a list of all the extensions possible for task descriptors """
return list(self._task_file_managers.keys())
def _cache_update_needed(self, course, taskid):
"""
:param course: a Course object
:param taskid: a (valid) task id
:raise InvalidNameException, TaskNotFoundException
:return: True if an update of the cache is needed, False else
"""
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(course.get_id(), taskid)
if (course.get_id(), taskid) not in self._cache:
return True
try:
last_update, __ = self._get_last_updates(course, taskid, task_fs, False)
except:
raise TaskNotFoundException()
last_modif = self._cache[(course.get_id(), taskid)][1]
for filename, mftime in last_update.items():
if filename not in last_modif or last_modif[filename] < mftime:
return True
return False
def _get_last_updates(self, course, taskid, task_fs, need_content=False):
descriptor_name, descriptor_reader = self._get_task_descriptor_info(course.get_id(), taskid)
last_update = {descriptor_name: task_fs.get_last_modification_time(descriptor_name)}
translations_fs = task_fs.from_subfolder("$i18n")
if not translations_fs.exists():
translations_fs = task_fs.from_subfolder("student").from_subfolder("$i18n")
if not translations_fs.exists():
translations_fs = course.get_fs().from_subfolder("$common").from_subfolder("$i18n")
if not translations_fs.exists():
translations_fs = course.get_fs().from_subfolder("$common").from_subfolder("student").from_subfolder(
"$i18n")
if not translations_fs.exists():
translations_fs = course.get_fs().from_subfolder("$i18n")
if translations_fs.exists():
for f in translations_fs.list(folders=False, files=True, recursive=False):
lang = f[0:len(f) - 3]
if translations_fs.exists(lang + ".mo"):
last_update["$i18n/" + lang + ".mo"] = translations_fs.get_last_modification_time(lang + ".mo")
if need_content:
try:
task_content = descriptor_reader.load(task_fs.get(descriptor_name))
except Exception as e:
raise TaskUnreadableException(str(e))
return last_update, task_content
else:
return last_update, None
def _update_cache(self, course, taskid):
"""
Updates the cache
:param course: a Course object
:param taskid: a (valid) task id
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
"""
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(course.get_id(), taskid)
last_modif, task_content = self._get_last_updates(course, taskid, task_fs, True)
self._cache[(course.get_id(), taskid)] = (
Task(course, taskid, task_content, self._filesystem, self._plugin_manager, self._task_problem_types),
last_modif
)
def update_cache_for_course(self, courseid):
"""
Clean/update the cache of all the tasks for a given course (id)
:param courseid:
"""
to_drop = []
for (cid, tid) in self._cache:
if cid == courseid:
to_drop.append(tid)
for tid in to_drop:
del self._cache[(courseid, tid)]
def create_task(self, course, taskid, init_content):
""" Create a new course folder and set initial descriptor content, folder can already exist
:param course: a Course object
:param taskid: the task id of the task
:param init_content: initial descriptor content
:raise: InvalidNameException or TaskAlreadyExistsException
"""
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(course.get_id(), taskid)
task_fs.ensure_exists()
if task_fs.exists("task.yaml"):
raise TaskAlreadyExistsException("Task with id " + taskid + " already exists.")
else:
task_fs.put("task.yaml", get_json_or_yaml("task.yaml", init_content))
get_course_logger(course.get_id()).info("Task %s created in the factory.", taskid)
def delete_task(self, courseid, taskid):
""" Erase the content of the task folder
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise: InvalidNameException or CourseNotFoundException
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(courseid, taskid)
if task_fs.exists():
task_fs.delete()
get_course_logger(courseid).info("Task %s erased from the factory.", taskid)
def get_problem_types(self):
"""
Returns the supported problem types by this task factory
"""
return self._task_problem_types | PypiClean |
/ElectroMagneticPython-2.1.4-py3-none-any.whl/EMpy/RCWA.py | __author__ = "Lorenzo Bolla"
import scipy as S
from scipy.linalg import toeplitz, inv, eig, solve as linsolve
from numpy import pi
from EMpy.utils import (
cond,
warning,
BinaryGrating,
SymmetricDoubleGrating,
AsymmetricDoubleGrating,
)
def dispersion_relation_ordinary(kx, ky, k, nO):
"""Dispersion relation for the ordinary wave.
NOTE
See eq. 15 in Glytsis, "Three-dimensional (vector) rigorous
coupled-wave analysis of anisotropic grating diffraction",
JOSA A, 7(8), 1990 Always give positive real or negative
imaginary.
"""
if kx.shape != ky.shape:
raise ValueError("kx and ky must have the same length")
delta = (k * nO) ** 2 - (kx**2 + ky**2)
kz = S.sqrt(delta)
# Adjust sign of real/imag part
kz.real = abs(kz.real)
kz.imag = -abs(kz.imag)
return kz
def dispersion_relation_extraordinary(kx, ky, k, nO, nE, c):
"""Dispersion relation for the extraordinary wave.
NOTE
See eq. 16 in Glytsis, "Three-dimensional (vector) rigorous
coupled-wave analysis of anisotropic grating diffraction",
JOSA A, 7(8), 1990 Always give positive real or negative
imaginary.
"""
if kx.shape != ky.shape or c.size != 3:
raise ValueError(
"kx and ky must have the same length and c must have 3 components"
)
kz = S.empty_like(kx)
for ii in range(0, kx.size):
alpha = nE**2 - nO**2
beta = kx[ii] / k * c[0] + ky[ii] / k * c[1]
# coeffs
C = S.array(
[
nO**2 + c[2] ** 2 * alpha,
2.0 * c[2] * beta * alpha,
nO**2 * (kx[ii] ** 2 + ky[ii] ** 2) / k**2
+ alpha * beta**2
- nO**2 * nE**2,
]
)
# two solutions of type +x or -x, purely real or purely imag
tmp_kz = k * S.roots(C)
# get the negative imaginary part or the positive real one
if S.any(S.isreal(tmp_kz)):
kz[ii] = S.absolute(tmp_kz[0])
else:
kz[ii] = -1j * S.absolute(tmp_kz[0])
return kz
class RCWA:
"""Class to handle the RCWA solvers.
NOTE
See Glytsis, "Three-dimensional (vector) rigorous coupled-wave
analysis of anisotropic grating diffraction", JOSA A, 7(8), 1990
The following variables, used throughout the code, have the following
meaning:
alpha: float
angle between wave vector k1 and xy plane, in radians
delta: float
angle between the y axis and the projection of k1 onto the xy
plane, in radians
psi: angle between the D vector of the plane wave and the xy plane,
in radians, TM: 0, TE: numpy.pi / 2
phi: angle between the grating vector K and y axis (in the xy plane),
in radians, the grating is modulated in the direction of the
grating vector.
"""
def __init__(self, multilayer, alpha, delta, psi, phi, n):
"""Set the multilayer, the angles of incidence and the diffraction order.
INPUT
multilayer = Multilayer obj describing the sequence of layers.
alpha, delta, psi, phi = angles of the incident wave (in
radiant).
n = orders of diffractions to retain in the computation.
"""
self.setMultilayer(multilayer)
self.LAMBDA = self.get_pitch()
self.alpha = alpha
self.delta = delta
self.psi = psi
self.phi = phi
self.n = n
def setMultilayer(self, m):
"""Set the multilayer, simplifying it first."""
self.multilayer = m.simplify()
def get_pitch(self):
"""Inspect the multilayer to check that all the binary
gratings present have the same pitch, and return it."""
idx = S.where(
[
(
isinstance(m, BinaryGrating)
| isinstance(m, SymmetricDoubleGrating)
| isinstance(m, AsymmetricDoubleGrating)
)
for m in self.multilayer
]
)[0]
if idx.size == 0:
# warning('no BinaryGratings: better use a simple transfer matrix.')
return 1.0 # return LAMBDA: any value will do
else:
# check that all the pitches are the same!
l = S.asarray([self.multilayer[i].pitch for i in idx])
if not S.all(l == l[0]):
raise ValueError("All the BinaryGratings must have the same pitch.")
else:
return l[0]
class IsotropicRCWA(RCWA):
"""Isotropic RCWA solver."""
def solve(self, wls):
"""Isotropic solver.
INPUT
wls = wavelengths to scan (any asarray-able object).
OUTPUT
self.DE1, self.DE3 = power reflected and transmitted.
NOTE
see:
Moharam, "Formulation for stable and efficient implementation
of the rigorous coupled-wave analysis of binary gratings",
JOSA A, 12(5), 1995
Lalanne, "Highly improved convergence of the coupled-wave
method for TM polarization", JOSA A, 13(4), 1996
Moharam, "Stable implementation of the rigorous coupled-wave
analysis for surface-relief gratings: enhanced trasmittance
matrix approach", JOSA A, 12(5), 1995
"""
self.wls = S.atleast_1d(wls)
LAMBDA = self.LAMBDA
n = self.n
multilayer = self.multilayer
alpha = self.alpha
delta = self.delta
psi = self.psi
phi = self.phi
nlayers = len(multilayer)
i = S.arange(-n, n + 1)
nood = 2 * n + 1
hmax = nood - 1
# grating vector (on the xz plane)
# grating on the xy plane
K = 2 * pi / LAMBDA * S.array([S.sin(phi), 0.0, S.cos(phi)], dtype=complex)
DE1 = S.zeros((nood, self.wls.size))
DE3 = S.zeros_like(DE1)
dirk1 = S.array(
[S.sin(alpha) * S.cos(delta), S.sin(alpha) * S.sin(delta), S.cos(alpha)]
)
# usefull matrices
I = S.eye(i.size)
I2 = S.eye(i.size * 2)
ZERO = S.zeros_like(I)
X = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex)
MTp1 = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex)
MTp2 = S.zeros_like(MTp1)
EPS2 = S.zeros(2 * hmax + 1, dtype=complex)
EPS21 = S.zeros_like(EPS2)
dlt = (i == 0).astype(int)
for iwl, wl in enumerate(self.wls):
# free space wavevector
k = 2 * pi / wl
n1 = multilayer[0].mat.n(wl).item()
n3 = multilayer[-1].mat.n(wl).item()
# incident plane wave wavevector
k1 = k * n1 * dirk1
# all the other wavevectors
tmp_x = k1[0] - i * K[0]
tmp_y = k1[1] * S.ones_like(i)
tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n1)
k1i = S.r_[[tmp_x], [tmp_y], [tmp_z]]
# k2i = S.r_[[k1[0] - i*K[0]], [k1[1] - i * K[1]], [-i * K[2]]]
tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n3)
k3i = S.r_[[k1i[0, :]], [k1i[1, :]], [tmp_z]]
# aliases for constant wavevectors
kx = k1i[0, :]
ky = k1[1]
# angles of reflection
# phi_i = S.arctan2(ky,kx)
phi_i = S.arctan2(ky, kx.real) # OKKIO
Kx = S.diag(kx / k)
Ky = ky / k * I
Z1 = S.diag(k1i[2, :] / (k * n1**2))
Y1 = S.diag(k1i[2, :] / k)
Z3 = S.diag(k3i[2, :] / (k * n3**2))
Y3 = S.diag(k3i[2, :] / k)
# Fc = S.diag(S.cos(phi_i))
fc = S.cos(phi_i)
# Fs = S.diag(S.sin(phi_i))
fs = S.sin(phi_i)
MR = S.asarray(
S.bmat([[I, ZERO], [-1j * Y1, ZERO], [ZERO, I], [ZERO, -1j * Z1]])
)
MT = S.asarray(
S.bmat([[I, ZERO], [1j * Y3, ZERO], [ZERO, I], [ZERO, 1j * Z3]])
)
# internal layers (grating or layer)
X.fill(0.0)
MTp1.fill(0.0)
MTp2.fill(0.0)
for nlayer in range(nlayers - 2, 0, -1): # internal layers
layer = multilayer[nlayer]
d = layer.thickness
EPS2, EPS21 = layer.getEPSFourierCoeffs(wl, n, anisotropic=False)
E = toeplitz(EPS2[hmax::-1], EPS2[hmax:])
E1 = toeplitz(EPS21[hmax::-1], EPS21[hmax:])
E11 = inv(E1)
# B = S.dot(Kx, linsolve(E,Kx)) - I
B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I
# A = S.dot(Kx, Kx) - E
A = S.diag((kx / k) ** 2) - E
# Note: solution bug alfredo
# randomizzo Kx un po' a caso finche' cond(A) e' piccolo (<1e10)
# soluzione sporca... :-(
# per certi kx, l'operatore di helmholtz ha 2 autovalori nulli e A, B
# non sono invertibili --> cambio leggermente i kx... ma dovrei invece
# trattare separatamente (analiticamente) questi casi
if cond(A) > 1e10:
warning("BAD CONDITIONING: randomization of kx")
while cond(A) > 1e10:
Kx = Kx * (1 + 1e-9 * S.rand())
B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I
A = S.diag((kx / k) ** 2) - E
if S.absolute(K[2] / k) > 1e-10:
raise ValueError(
"First Order Helmholtz Operator not implemented, yet!"
)
elif ky == 0 or S.allclose(S.diag(Ky / ky * k), 1):
# lalanne
# H_U_reduced = S.dot(Ky, Ky) + A
H_U_reduced = (ky / k) ** 2 * I + A
# H_S_reduced = S.dot(Ky, Ky) + S.dot(Kx, linsolve(E, S.dot(Kx, E11))) - E11
H_S_reduced = (
(ky / k) ** 2 * I
+ kx[:, S.newaxis] / k * linsolve(E, kx[:, S.newaxis] / k * E11)
- E11
)
q1, W1 = eig(H_U_reduced)
q1 = S.sqrt(q1)
q2, W2 = eig(H_S_reduced)
q2 = S.sqrt(q2)
# boundary conditions
# V11 = S.dot(linsolve(A, W1), S.diag(q1))
V11 = linsolve(A, W1) * q1[S.newaxis, :]
V12 = (ky / k) * S.dot(linsolve(A, Kx), W2)
V21 = (ky / k) * S.dot(linsolve(B, Kx), linsolve(E, W1))
# V22 = S.dot(linsolve(B, W2), S.diag(q2))
V22 = linsolve(B, W2) * q2[S.newaxis, :]
# Vss = S.dot(Fc, V11)
Vss = fc[:, S.newaxis] * V11
# Wss = S.dot(Fc, W1) + S.dot(Fs, V21)
Wss = fc[:, S.newaxis] * W1 + fs[:, S.newaxis] * V21
# Vsp = S.dot(Fc, V12) - S.dot(Fs, W2)
Vsp = fc[:, S.newaxis] * V12 - fs[:, S.newaxis] * W2
# Wsp = S.dot(Fs, V22)
Wsp = fs[:, S.newaxis] * V22
# Wpp = S.dot(Fc, V22)
Wpp = fc[:, S.newaxis] * V22
# Vpp = S.dot(Fc, W2) + S.dot(Fs, V12)
Vpp = fc[:, S.newaxis] * W2 + fs[:, S.newaxis] * V12
# Wps = S.dot(Fc, V21) - S.dot(Fs, W1)
Wps = fc[:, S.newaxis] * V21 - fs[:, S.newaxis] * W1
# Vps = S.dot(Fs, V11)
Vps = fs[:, S.newaxis] * V11
Mc2bar = S.asarray(
S.bmat(
[
[Vss, Vsp, Vss, Vsp],
[Wss, Wsp, -Wss, -Wsp],
[Wps, Wpp, -Wps, -Wpp],
[Vps, Vpp, Vps, Vpp],
]
)
)
x = S.r_[S.exp(-k * q1 * d), S.exp(-k * q2 * d)]
# Mc1 = S.dot(Mc2bar, S.diag(S.r_[S.ones_like(x), x]))
xx = S.r_[S.ones_like(x), x]
Mc1 = Mc2bar * xx[S.newaxis, :]
X[:, :, nlayer] = S.diag(x)
MTp = linsolve(Mc2bar, MT)
MTp1[:, :, nlayer] = MTp[0 : 2 * nood, :]
MTp2 = MTp[2 * nood :, :]
MT = S.dot(
Mc1,
S.r_[
I2,
S.dot(MTp2, linsolve(MTp1[:, :, nlayer], X[:, :, nlayer])),
],
)
else:
ValueError("Second Order Helmholtz Operator not implemented, yet!")
# M = S.asarray(S.bmat([-MR, MT]))
M = S.c_[-MR, MT]
b = S.r_[
S.sin(psi) * dlt,
1j * S.sin(psi) * n1 * S.cos(alpha) * dlt,
-1j * S.cos(psi) * n1 * dlt,
S.cos(psi) * S.cos(alpha) * dlt,
]
x = linsolve(M, b)
R, T = S.split(x, 2)
Rs, Rp = S.split(R, 2)
for ii in range(1, nlayers - 1):
T = S.dot(linsolve(MTp1[:, :, ii], X[:, :, ii]), T)
Ts, Tp = S.split(T, 2)
DE1[:, iwl] = (k1i[2, :] / (k1[2])).real * S.absolute(Rs) ** 2 + (
k1i[2, :] / (k1[2] * n1**2)
).real * S.absolute(Rp) ** 2
DE3[:, iwl] = (k3i[2, :] / (k1[2])).real * S.absolute(Ts) ** 2 + (
k3i[2, :] / (k1[2] * n3**2)
).real * S.absolute(Tp) ** 2
# save the results
self.DE1 = DE1
self.DE3 = DE3
return self
# def plot(self):
# """Plot the diffraction efficiencies."""
# g = Gnuplot.Gnuplot()
# g('set xlabel "$\lambda$"')
# g('set ylabel "diffraction efficiency"')
# g('set yrange [0:1]')
# g('set data style linespoints')
# g.plot(Gnuplot.Data(self.wls, self.DE1[self.n,:], with_ = 'linespoints', title = 'DE1'), \
# Gnuplot.Data(self.wls, self.DE3[self.n,:], with_ = 'linespoints', title = 'DE3'))
# raw_input('press enter to close the graph...')
def __str__(self):
return (
"ISOTROPIC RCWA SOLVER\n\n%s\n\nLAMBDA = %g\nalpha = %g\ndelta = %g\npsi = %g\nphi = %g\nn = %d"
% (
self.multilayer.__str__(),
self.LAMBDA,
self.alpha,
self.delta,
self.psi,
self.phi,
self.n,
)
)
class AnisotropicRCWA(RCWA):
"""Anisotropic RCWA solver."""
def solve(self, wls):
"""Anisotropic solver.
INPUT
wls = wavelengths to scan (any asarray-able object).
OUTPUT
self.DEO1, self.DEE1, self.DEO3, self.DEE3 = power reflected
and transmitted.
"""
self.wls = S.atleast_1d(wls)
LAMBDA = self.LAMBDA
n = self.n
multilayer = self.multilayer
alpha = self.alpha
delta = self.delta
psi = self.psi
phi = self.phi
nlayers = len(multilayer)
i = S.arange(-n, n + 1)
nood = 2 * n + 1
hmax = nood - 1
DEO1 = S.zeros((nood, self.wls.size))
DEO3 = S.zeros_like(DEO1)
DEE1 = S.zeros_like(DEO1)
DEE3 = S.zeros_like(DEO1)
c1 = S.array([1.0, 0.0, 0.0])
c3 = S.array([1.0, 0.0, 0.0])
# grating on the xy plane
K = 2 * pi / LAMBDA * S.array([S.sin(phi), 0.0, S.cos(phi)], dtype=complex)
dirk1 = S.array(
[S.sin(alpha) * S.cos(delta), S.sin(alpha) * S.sin(delta), S.cos(alpha)]
)
# D polarization vector
u = S.array(
[
S.cos(psi) * S.cos(alpha) * S.cos(delta) - S.sin(psi) * S.sin(delta),
S.cos(psi) * S.cos(alpha) * S.sin(delta) + S.sin(psi) * S.cos(delta),
-S.cos(psi) * S.sin(alpha),
]
)
kO1i = S.zeros((3, i.size), dtype=complex)
kE1i = S.zeros_like(kO1i)
kO3i = S.zeros_like(kO1i)
kE3i = S.zeros_like(kO1i)
Mp = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex)
M = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex)
dlt = (i == 0).astype(int)
for iwl, wl in enumerate(self.wls):
nO1 = nE1 = multilayer[0].mat.n(wl).item()
nO3 = nE3 = multilayer[-1].mat.n(wl).item()
# wavevectors
k = 2 * pi / wl
eps1 = S.diag(S.asarray([nE1, nO1, nO1]) ** 2)
eps3 = S.diag(S.asarray([nE3, nO3, nO3]) ** 2)
# ordinary wave
abskO1 = k * nO1
# abskO3 = k * nO3
# extraordinary wave
# abskE1 = k * nO1 *nE1 / S.sqrt(nO1**2 + (nE1**2 - nO1**2) * S.dot(-c1, dirk1)**2)
# abskE3 = k * nO3 *nE3 / S.sqrt(nO3**2 + (nE3**2 - nO3**2) * S.dot(-c3, dirk1)**2)
k1 = abskO1 * dirk1
kO1i[0, :] = k1[0] - i * K[0]
kO1i[1, :] = k1[1] * S.ones_like(i)
kO1i[2, :] = -dispersion_relation_ordinary(kO1i[0, :], kO1i[1, :], k, nO1)
kE1i[0, :] = kO1i[0, :]
kE1i[1, :] = kO1i[1, :]
kE1i[2, :] = -dispersion_relation_extraordinary(
kE1i[0, :], kE1i[1, :], k, nO1, nE1, c1
)
kO3i[0, :] = kO1i[0, :]
kO3i[1, :] = kO1i[1, :]
kO3i[2, :] = dispersion_relation_ordinary(kO3i[0, :], kO3i[1, :], k, nO3)
kE3i[0, :] = kO1i[0, :]
kE3i[1, :] = kO1i[1, :]
kE3i[2, :] = dispersion_relation_extraordinary(
kE3i[0, :], kE3i[1, :], k, nO3, nE3, c3
)
# k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [k1[2] - i * K[2]]]
k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [-i * K[2]]]
# aliases for constant wavevectors
kx = kO1i[0, :] # o kE1i(1,;), tanto e' lo stesso
ky = k1[1]
# matrices
I = S.eye(nood, dtype=complex)
ZERO = S.zeros((nood, nood), dtype=complex)
Kx = S.diag(kx / k)
Ky = ky / k * I
Kz = S.diag(k2i[2, :] / k)
KO1z = S.diag(kO1i[2, :] / k)
KE1z = S.diag(kE1i[2, :] / k)
KO3z = S.diag(kO3i[2, :] / k)
KE3z = S.diag(kE3i[2, :] / k)
ARO = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KO1z * eps1[2, 0]
BRO = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KO1z * eps1[2, 1]
CRO_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KO1z * eps1[2, 2])
ARE = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KE1z * eps1[2, 0]
BRE = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KE1z * eps1[2, 1]
CRE_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KE1z * eps1[2, 2])
ATO = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KO3z * eps3[2, 0]
BTO = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KO3z * eps3[2, 1]
CTO_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KO3z * eps3[2, 2])
ATE = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KE3z * eps3[2, 0]
BTE = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KE3z * eps3[2, 1]
CTE_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KE3z * eps3[2, 2])
DRE = c1[1] * KE1z - c1[2] * Ky
ERE = c1[2] * Kx - c1[0] * KE1z
FRE = c1[0] * Ky - c1[1] * Kx
DTE = c3[1] * KE3z - c3[2] * Ky
ETE = c3[2] * Kx - c3[0] * KE3z
FTE = c3[0] * Ky - c3[1] * Kx
b = S.r_[
u[0] * dlt,
u[1] * dlt,
(k1[1] / k * u[2] - k1[2] / k * u[1]) * dlt,
(k1[2] / k * u[0] - k1[0] / k * u[2]) * dlt,
]
Ky_CRO_1 = ky / k * CRO_1
Ky_CRE_1 = ky / k * CRE_1
Kx_CRO_1 = kx[:, S.newaxis] / k * CRO_1
Kx_CRE_1 = kx[:, S.newaxis] / k * CRE_1
MR31 = -S.dot(Ky_CRO_1, ARO)
MR32 = -S.dot(Ky_CRO_1, BRO) - KO1z
MR33 = -S.dot(Ky_CRE_1, ARE)
MR34 = -S.dot(Ky_CRE_1, BRE) - KE1z
MR41 = S.dot(Kx_CRO_1, ARO) + KO1z
MR42 = S.dot(Kx_CRO_1, BRO)
MR43 = S.dot(Kx_CRE_1, ARE) + KE1z
MR44 = S.dot(Kx_CRE_1, BRE)
MR = S.asarray(
S.bmat(
[
[I, ZERO, I, ZERO],
[ZERO, I, ZERO, I],
[MR31, MR32, MR33, MR34],
[MR41, MR42, MR43, MR44],
]
)
)
Ky_CTO_1 = ky / k * CTO_1
Ky_CTE_1 = ky / k * CTE_1
Kx_CTO_1 = kx[:, S.newaxis] / k * CTO_1
Kx_CTE_1 = kx[:, S.newaxis] / k * CTE_1
MT31 = -S.dot(Ky_CTO_1, ATO)
MT32 = -S.dot(Ky_CTO_1, BTO) - KO3z
MT33 = -S.dot(Ky_CTE_1, ATE)
MT34 = -S.dot(Ky_CTE_1, BTE) - KE3z
MT41 = S.dot(Kx_CTO_1, ATO) + KO3z
MT42 = S.dot(Kx_CTO_1, BTO)
MT43 = S.dot(Kx_CTE_1, ATE) + KE3z
MT44 = S.dot(Kx_CTE_1, BTE)
MT = S.asarray(
S.bmat(
[
[I, ZERO, I, ZERO],
[ZERO, I, ZERO, I],
[MT31, MT32, MT33, MT34],
[MT41, MT42, MT43, MT44],
]
)
)
Mp.fill(0.0)
M.fill(0.0)
for nlayer in range(nlayers - 2, 0, -1): # internal layers
layer = multilayer[nlayer]
thickness = layer.thickness
EPS2, EPS21 = layer.getEPSFourierCoeffs(wl, n, anisotropic=True)
# Exx = S.squeeze(EPS2[0, 0, :])
# Exx = toeplitz(S.flipud(Exx[0:hmax + 1]), Exx[hmax:])
Exy = S.squeeze(EPS2[0, 1, :])
Exy = toeplitz(S.flipud(Exy[0 : hmax + 1]), Exy[hmax:])
Exz = S.squeeze(EPS2[0, 2, :])
Exz = toeplitz(S.flipud(Exz[0 : hmax + 1]), Exz[hmax:])
Eyx = S.squeeze(EPS2[1, 0, :])
Eyx = toeplitz(S.flipud(Eyx[0 : hmax + 1]), Eyx[hmax:])
Eyy = S.squeeze(EPS2[1, 1, :])
Eyy = toeplitz(S.flipud(Eyy[0 : hmax + 1]), Eyy[hmax:])
Eyz = S.squeeze(EPS2[1, 2, :])
Eyz = toeplitz(S.flipud(Eyz[0 : hmax + 1]), Eyz[hmax:])
Ezx = S.squeeze(EPS2[2, 0, :])
Ezx = toeplitz(S.flipud(Ezx[0 : hmax + 1]), Ezx[hmax:])
Ezy = S.squeeze(EPS2[2, 1, :])
Ezy = toeplitz(S.flipud(Ezy[0 : hmax + 1]), Ezy[hmax:])
Ezz = S.squeeze(EPS2[2, 2, :])
Ezz = toeplitz(S.flipud(Ezz[0 : hmax + 1]), Ezz[hmax:])
Exx_1 = S.squeeze(EPS21[0, 0, :])
Exx_1 = toeplitz(S.flipud(Exx_1[0 : hmax + 1]), Exx_1[hmax:])
Exx_1_1 = inv(Exx_1)
# lalanne
Ezz_1 = inv(Ezz)
Ky_Ezz_1 = ky / k * Ezz_1
Kx_Ezz_1 = kx[:, S.newaxis] / k * Ezz_1
Exz_Ezz_1 = S.dot(Exz, Ezz_1)
Eyz_Ezz_1 = S.dot(Eyz, Ezz_1)
H11 = 1j * S.dot(Ky_Ezz_1, Ezy)
H12 = 1j * S.dot(Ky_Ezz_1, Ezx)
H13 = S.dot(Ky_Ezz_1, Kx)
H14 = I - S.dot(Ky_Ezz_1, Ky)
H21 = 1j * S.dot(Kx_Ezz_1, Ezy)
H22 = 1j * S.dot(Kx_Ezz_1, Ezx)
H23 = S.dot(Kx_Ezz_1, Kx) - I
H24 = -S.dot(Kx_Ezz_1, Ky)
H31 = S.dot(Kx, Ky) + Exy - S.dot(Exz_Ezz_1, Ezy)
H32 = Exx_1_1 - S.dot(Ky, Ky) - S.dot(Exz_Ezz_1, Ezx)
H33 = 1j * S.dot(Exz_Ezz_1, Kx)
H34 = -1j * S.dot(Exz_Ezz_1, Ky)
H41 = S.dot(Kx, Kx) - Eyy + S.dot(Eyz_Ezz_1, Ezy)
H42 = -S.dot(Kx, Ky) - Eyx + S.dot(Eyz_Ezz_1, Ezx)
H43 = -1j * S.dot(Eyz_Ezz_1, Kx)
H44 = 1j * S.dot(Eyz_Ezz_1, Ky)
H = 1j * S.diag(S.repeat(S.diag(Kz), 4)) + S.asarray(
S.bmat(
[
[H11, H12, H13, H14],
[H21, H22, H23, H24],
[H31, H32, H33, H34],
[H41, H42, H43, H44],
]
)
)
q, W = eig(H)
W1, W2, W3, W4 = S.split(W, 4)
#
# boundary conditions
#
# x = [R T]
# R = [ROx ROy REx REy]
# T = [TOx TOy TEx TEy]
# b + MR.R = M1p.c
# M1.c = M2p.c
# ...
# ML.c = MT.T
# therefore: b + MR.R = (M1p.M1^-1.M2p.M2^-1. ...).MT.T
# missing equations from (46)..(49) in glytsis_rigorous
# [b] = [-MR Mtot.MT] [R]
# [0] [...........] [T]
z = S.zeros_like(q)
z[S.where(q.real > 0)] = -thickness
D = S.exp(k * q * z)
Sy0 = W1 * D[S.newaxis, :]
Sx0 = W2 * D[S.newaxis, :]
Uy0 = W3 * D[S.newaxis, :]
Ux0 = W4 * D[S.newaxis, :]
z = thickness * S.ones_like(q)
z[S.where(q.real > 0)] = 0
D = S.exp(k * q * z)
D1 = S.exp(-1j * k2i[2, :] * thickness)
Syd = D1[:, S.newaxis] * W1 * D[S.newaxis, :]
Sxd = D1[:, S.newaxis] * W2 * D[S.newaxis, :]
Uyd = D1[:, S.newaxis] * W3 * D[S.newaxis, :]
Uxd = D1[:, S.newaxis] * W4 * D[S.newaxis, :]
Mp[:, :, nlayer] = S.r_[Sx0, Sy0, -1j * Ux0, -1j * Uy0]
M[:, :, nlayer] = S.r_[Sxd, Syd, -1j * Uxd, -1j * Uyd]
Mtot = S.eye(4 * nood, dtype=complex)
for nlayer in range(1, nlayers - 1):
Mtot = S.dot(S.dot(Mtot, Mp[:, :, nlayer]), inv(M[:, :, nlayer]))
BC_b = S.r_[b, S.zeros_like(b)]
BC_A1 = S.c_[-MR, S.dot(Mtot, MT)]
BC_A2 = S.asarray(
S.bmat(
[
[
(c1[0] * I - c1[2] * S.dot(CRO_1, ARO)),
(c1[1] * I - c1[2] * S.dot(CRO_1, BRO)),
ZERO,
ZERO,
ZERO,
ZERO,
ZERO,
ZERO,
],
[
ZERO,
ZERO,
(DRE - S.dot(S.dot(FRE, CRE_1), ARE)),
(ERE - S.dot(S.dot(FRE, CRE_1), BRE)),
ZERO,
ZERO,
ZERO,
ZERO,
],
[
ZERO,
ZERO,
ZERO,
ZERO,
(c3[0] * I - c3[2] * S.dot(CTO_1, ATO)),
(c3[1] * I - c3[2] * S.dot(CTO_1, BTO)),
ZERO,
ZERO,
],
[
ZERO,
ZERO,
ZERO,
ZERO,
ZERO,
ZERO,
(DTE - S.dot(S.dot(FTE, CTE_1), ATE)),
(ETE - S.dot(S.dot(FTE, CTE_1), BTE)),
],
]
)
)
BC_A = S.r_[BC_A1, BC_A2]
x = linsolve(BC_A, BC_b)
ROx, ROy, REx, REy, TOx, TOy, TEx, TEy = S.split(x, 8)
ROz = -S.dot(CRO_1, (S.dot(ARO, ROx) + S.dot(BRO, ROy)))
REz = -S.dot(CRE_1, (S.dot(ARE, REx) + S.dot(BRE, REy)))
TOz = -S.dot(CTO_1, (S.dot(ATO, TOx) + S.dot(BTO, TOy)))
TEz = -S.dot(CTE_1, (S.dot(ATE, TEx) + S.dot(BTE, TEy)))
denom = (k1[2] - S.dot(u, k1) * u[2]).real
DEO1[:, iwl] = (
-(
(S.absolute(ROx) ** 2 + S.absolute(ROy) ** 2 + S.absolute(ROz) ** 2)
* S.conj(kO1i[2, :])
- (ROx * kO1i[0, :] + ROy * kO1i[1, :] + ROz * kO1i[2, :])
* S.conj(ROz)
).real
/ denom
)
DEE1[:, iwl] = (
-(
(S.absolute(REx) ** 2 + S.absolute(REy) ** 2 + S.absolute(REz) ** 2)
* S.conj(kE1i[2, :])
- (REx * kE1i[0, :] + REy * kE1i[1, :] + REz * kE1i[2, :])
* S.conj(REz)
).real
/ denom
)
DEO3[:, iwl] = (
(S.absolute(TOx) ** 2 + S.absolute(TOy) ** 2 + S.absolute(TOz) ** 2)
* S.conj(kO3i[2, :])
- (TOx * kO3i[0, :] + TOy * kO3i[1, :] + TOz * kO3i[2, :]) * S.conj(TOz)
).real / denom
DEE3[:, iwl] = (
(S.absolute(TEx) ** 2 + S.absolute(TEy) ** 2 + S.absolute(TEz) ** 2)
* S.conj(kE3i[2, :])
- (TEx * kE3i[0, :] + TEy * kE3i[1, :] + TEz * kE3i[2, :]) * S.conj(TEz)
).real / denom
# save the results
self.DEO1 = DEO1
self.DEE1 = DEE1
self.DEO3 = DEO3
self.DEE3 = DEE3
return self
# def plot(self):
# """Plot the diffraction efficiencies."""
# g = Gnuplot.Gnuplot()
# g('set xlabel "$\lambda$"')
# g('set ylabel "diffraction efficiency"')
# g('set yrange [0:1]')
# g('set data style linespoints')
# g.plot(Gnuplot.Data(self.wls, self.DEO1[self.n,:], with_ = 'linespoints', title = 'DEO1'), \
# Gnuplot.Data(self.wls, self.DEO3[self.n,:], with_ = 'linespoints', title = 'DEO3'), \
# Gnuplot.Data(self.wls, self.DEE1[self.n,:], with_ = 'linespoints', title = 'DEE1'), \
# Gnuplot.Data(self.wls, self.DEE3[self.n,:], with_ = 'linespoints', title = 'DEE3'))
# raw_input('press enter to close the graph...')
def __str__(self):
return (
"ANISOTROPIC RCWA SOLVER\n\n%s\n\nLAMBDA = %g\nalpha = %g\ndelta = %g\npsi = %g\nphi = %g\nn = %d"
% (
self.multilayer.__str__(),
self.LAMBDA,
self.alpha,
self.delta,
self.psi,
self.phi,
self.n,
)
) | PypiClean |
/GooseSLURM-0.12.4.tar.gz/GooseSLURM-0.12.4/docs/python.rst |
*************
Python module
*************
Overview
========
Write job scripts
-----------------
.. autosummary::
GooseSLURM.scripts.plain
GooseSLURM.scripts.tempdir
GooseSLURM.files.cmake
Parse ps
--------
.. autosummary::
GooseSLURM.ps.read_interpret
GooseSLURM.ps.read
GooseSLURM.ps.interpret
GooseSLURM.ps.colors
Parse squeue
------------
.. autosummary::
GooseSLURM.squeue.read_interpret
GooseSLURM.squeue.read
GooseSLURM.squeue.interpret
GooseSLURM.squeue.colors
Parse sinfo
-----------
.. autosummary::
GooseSLURM.sinfo.read_interpret
GooseSLURM.sinfo.read
GooseSLURM.sinfo.interpret
GooseSLURM.sinfo.colors
Rich strings
------------
.. autosummary::
GooseSLURM.rich.String
GooseSLURM.rich.Integer
GooseSLURM.rich.Float
GooseSLURM.rich.Duration
GooseSLURM.rich.Memory
Print
-----
.. autosummary::
GooseSLURM.table.print_long
GooseSLURM.table.print_columns
GooseSLURM.table.print_list
Duration
--------
.. autosummary::
GooseSLURM.duration.asSeconds
GooseSLURM.duration.asUnit
GooseSLURM.duration.asHuman
GooseSLURM.duration.asSlurm
Memory
------
.. autosummary::
GooseSLURM.memory.asBytes
GooseSLURM.memory.asUnit
GooseSLURM.memory.asHuman
GooseSLURM.memory.asSlurm
Documentation
=============
GooseSLURM.scripts
------------------
.. automodule:: GooseSLURM.scripts
:members:
GooseSLURM.files
----------------
.. automodule:: GooseSLURM.files
:members:
GooseSLURM.ps
-------------
.. automodule:: GooseSLURM.ps
:members:
GooseSLURM.squeue
-----------------
.. automodule:: GooseSLURM.squeue
:members:
GooseSLURM.sinfo
----------------
.. automodule:: GooseSLURM.sinfo
:members:
GooseSLURM.rich
---------------
.. automodule:: GooseSLURM.rich
:members:
GooseSLURM.table
----------------
.. automodule:: GooseSLURM.table
:members:
GooseSLURM.duration
-------------------
.. automodule:: GooseSLURM.duration
:members:
GooseSLURM.memory
-----------------
.. automodule:: GooseSLURM.memory
:members:
| PypiClean |
/ChemGAPP-0.0.9-py3-none-any.whl/ChemGAPP_Package/ChemGAPP_Big/Add_Gene_Names.py |
# In[ ]:
import argparse
import os
import pandas as pd
import re
def get_options():
parser = argparse.ArgumentParser(description="Add the gene names from the plate info files to make the final dataset. The plate info files must be in a folder by themselves and should be .txt files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-i", "--InputFile", help="The CSV output of S_Scores.py")
parser.add_argument("-o", "--Outputpath", help="A path a prefix for the output files E.g ~/Desktop/ChemGAPP would make ChemGAPP_Final_Dataset.txt .")
parser.add_argument("-p", "--PATH", help="The path to the folder containing the plate info files.")
return parser.parse_args()
def main():
options = get_options()
PATH = options.PATH
inputfile = options.InputFile
outputpath = options.Outputpath
def plate_info(file):
p = pd.read_table(file)
#renames the plate info file columns and adds row and column to index before sorting by index.
if len(p.columns) == 3:
p.columns = ['row','column','strain']
p = p.set_index(['row','column'])
p = p.sort_index()
else:
print("Plate information file" + str(file) + "not in correct format.")
return p
# assign directory
directory = os.path.expanduser(PATH)
# iterate over files in
# that directory
files = []
for filename in os.listdir(directory):
f = os.path.join(directory, filename)
# checking if it is a file
if f.endswith(".txt"):
if os.path.isfile(f):
#print(f)
files.append(f)
# looks for digits within the file names so that order of
# plate info files is sorted plate1, plate2, plate3 etc. not plate1, plate10, plate2 etc.
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
files.sort(key=natural_keys)
plate_DFs = []
for i in files:
p = plate_info(i)
plate_DFs.append(p)
nm3 = pd.read_csv(inputfile,index_col=[0, 1], header=[0, 1])
plates = {x[0] for x in nm3.columns}
nm4 = nm3.copy(deep=False)
nm4['1','strain'] = 'p'
columns1 = {x[1] for x in nm4.columns}
#names columns with condition
df_with_strains = pd.DataFrame(columns = sorted(columns1))
# splits by each source plate and then assigns gene names from matching plate information file.
for a, n in zip(plate_DFs, sorted(plates)):
df1 = (nm3.xs((n), axis =1, drop_level=True))
df2 = pd.merge(df1,a,left_index=True, right_index=True)
df_with_strains = pd.concat([df_with_strains , df2], ignore_index=True)
df_with_strains = df_with_strains.rename(columns={'strain': 'Gene'})
df_with_strains = df_with_strains.set_index('Gene')
#averages scores for rows with the same gene name.
groupmean = df_with_strains.groupby(level=0).mean()
df_averaged = pd.DataFrame(groupmean, index=groupmean.index, columns=groupmean.columns)
df_averaged.index.name = None
outputfile = os.path.expanduser(outputpath)
df_with_strains.to_csv(outputfile+"_Final_Dataset.txt", sep='\t')
df_averaged.to_csv(outputfile+"_Final_Dataset_Averaged.txt", sep='\t')
return df_with_strains
if __name__ == "__main__":
main() | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/datatables-searchbuilder/js/dataTables.searchBuilder.min.js | var $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.ASSUME_ES5=!1;$jscomp.ASSUME_NO_NATIVE_MAP=!1;$jscomp.ASSUME_NO_NATIVE_SET=!1;$jscomp.SIMPLE_FROUND_POLYFILL=!1;$jscomp.ISOLATE_POLYFILLS=!1;$jscomp.defineProperty=$jscomp.ASSUME_ES5||"function"==typeof Object.defineProperties?Object.defineProperty:function(k,m,l){if(k==Array.prototype||k==Object.prototype)return k;k[m]=l.value;return k};
$jscomp.getGlobal=function(k){k=["object"==typeof globalThis&&globalThis,k,"object"==typeof window&&window,"object"==typeof self&&self,"object"==typeof global&&global];for(var m=0;m<k.length;++m){var l=k[m];if(l&&l.Math==Math)return l}throw Error("Cannot find global object");};$jscomp.global=$jscomp.getGlobal(this);$jscomp.IS_SYMBOL_NATIVE="function"===typeof Symbol&&"symbol"===typeof Symbol("x");$jscomp.TRUST_ES6_POLYFILLS=!$jscomp.ISOLATE_POLYFILLS||$jscomp.IS_SYMBOL_NATIVE;$jscomp.polyfills={};
$jscomp.propertyToPolyfillSymbol={};$jscomp.POLYFILL_PREFIX="$jscp$";var $jscomp$lookupPolyfilledValue=function(k,m){var l=$jscomp.propertyToPolyfillSymbol[m];if(null==l)return k[m];l=k[l];return void 0!==l?l:k[m]};$jscomp.polyfill=function(k,m,l,h){m&&($jscomp.ISOLATE_POLYFILLS?$jscomp.polyfillIsolated(k,m,l,h):$jscomp.polyfillUnisolated(k,m,l,h))};
$jscomp.polyfillUnisolated=function(k,m,l,h){l=$jscomp.global;k=k.split(".");for(h=0;h<k.length-1;h++){var p=k[h];if(!(p in l))return;l=l[p]}k=k[k.length-1];h=l[k];m=m(h);m!=h&&null!=m&&$jscomp.defineProperty(l,k,{configurable:!0,writable:!0,value:m})};
$jscomp.polyfillIsolated=function(k,m,l,h){var p=k.split(".");k=1===p.length;h=p[0];h=!k&&h in $jscomp.polyfills?$jscomp.polyfills:$jscomp.global;for(var t=0;t<p.length-1;t++){var v=p[t];if(!(v in h))return;h=h[v]}p=p[p.length-1];l=$jscomp.IS_SYMBOL_NATIVE&&"es6"===l?h[p]:null;m=m(l);null!=m&&(k?$jscomp.defineProperty($jscomp.polyfills,p,{configurable:!0,writable:!0,value:m}):m!==l&&($jscomp.propertyToPolyfillSymbol[p]=$jscomp.IS_SYMBOL_NATIVE?$jscomp.global.Symbol(p):$jscomp.POLYFILL_PREFIX+p,p=
$jscomp.propertyToPolyfillSymbol[p],$jscomp.defineProperty(h,p,{configurable:!0,writable:!0,value:m})))};$jscomp.polyfill("Object.is",function(k){return k?k:function(m,l){return m===l?0!==m||1/m===1/l:m!==m&&l!==l}},"es6","es3");$jscomp.polyfill("Array.prototype.includes",function(k){return k?k:function(m,l){var h=this;h instanceof String&&(h=String(h));var p=h.length;l=l||0;for(0>l&&(l=Math.max(l+p,0));l<p;l++){var t=h[l];if(t===m||Object.is(t,m))return!0}return!1}},"es7","es3");
$jscomp.checkStringArgs=function(k,m,l){if(null==k)throw new TypeError("The 'this' value for String.prototype."+l+" must not be null or undefined");if(m instanceof RegExp)throw new TypeError("First argument to String.prototype."+l+" must not be a regular expression");return k+""};$jscomp.polyfill("String.prototype.includes",function(k){return k?k:function(m,l){return-1!==$jscomp.checkStringArgs(this,m,"includes").indexOf(m,l||0)}},"es6","es3");
$jscomp.arrayIteratorImpl=function(k){var m=0;return function(){return m<k.length?{done:!1,value:k[m++]}:{done:!0}}};$jscomp.arrayIterator=function(k){return{next:$jscomp.arrayIteratorImpl(k)}};$jscomp.initSymbol=function(){};
$jscomp.polyfill("Symbol",function(k){if(k)return k;var m=function(p,t){this.$jscomp$symbol$id_=p;$jscomp.defineProperty(this,"description",{configurable:!0,writable:!0,value:t})};m.prototype.toString=function(){return this.$jscomp$symbol$id_};var l=0,h=function(p){if(this instanceof h)throw new TypeError("Symbol is not a constructor");return new m("jscomp_symbol_"+(p||"")+"_"+l++,p)};return h},"es6","es3");$jscomp.initSymbolIterator=function(){};
$jscomp.polyfill("Symbol.iterator",function(k){if(k)return k;k=Symbol("Symbol.iterator");for(var m="Array Int8Array Uint8Array Uint8ClampedArray Int16Array Uint16Array Int32Array Uint32Array Float32Array Float64Array".split(" "),l=0;l<m.length;l++){var h=$jscomp.global[m[l]];"function"===typeof h&&"function"!=typeof h.prototype[k]&&$jscomp.defineProperty(h.prototype,k,{configurable:!0,writable:!0,value:function(){return $jscomp.iteratorPrototype($jscomp.arrayIteratorImpl(this))}})}return k},"es6",
"es3");$jscomp.initSymbolAsyncIterator=function(){};$jscomp.iteratorPrototype=function(k){k={next:k};k[Symbol.iterator]=function(){return this};return k};$jscomp.iteratorFromArray=function(k,m){k instanceof String&&(k+="");var l=0,h={next:function(){if(l<k.length){var p=l++;return{value:m(p,k[p]),done:!1}}h.next=function(){return{done:!0,value:void 0}};return h.next()}};h[Symbol.iterator]=function(){return h};return h};
$jscomp.polyfill("Array.prototype.keys",function(k){return k?k:function(){return $jscomp.iteratorFromArray(this,function(m){return m})}},"es6","es3");$jscomp.polyfill("String.prototype.startsWith",function(k){return k?k:function(m,l){var h=$jscomp.checkStringArgs(this,m,"startsWith");m+="";var p=h.length,t=m.length;l=Math.max(0,Math.min(l|0,h.length));for(var v=0;v<t&&l<p;)if(h[l++]!=m[v++])return!1;return v>=t}},"es6","es3");
$jscomp.polyfill("String.prototype.endsWith",function(k){return k?k:function(m,l){var h=$jscomp.checkStringArgs(this,m,"endsWith");m+="";void 0===l&&(l=h.length);l=Math.max(0,Math.min(l|0,h.length));for(var p=m.length;0<p&&0<l;)if(h[--l]!=m[--p])return!1;return 0>=p}},"es6","es3");
(function(){function k(c){h=c;p=c.fn.dataTable}function m(c){B=c;E=c.fn.dataTable}function l(c){x=c;C=c.fn.DataTable}var h,p,t=window.moment,v=window.luxon,r=function(){function c(a,b,d,e,f){var g=this;void 0===e&&(e=0);void 0===f&&(f=1);if(!p||!p.versionCheck||!p.versionCheck("1.10.0"))throw Error("SearchPane requires DataTables 1.10 or newer");this.classes=h.extend(!0,{},c.classes);this.c=h.extend(!0,{},c.defaults,h.fn.dataTable.ext.searchBuilder,b);b=this.c.i18n;this.s={condition:void 0,conditions:{},
data:void 0,dataIdx:-1,dataPoints:[],dateFormat:!1,depth:f,dt:a,filled:!1,index:e,origData:void 0,topGroup:d,type:"",value:[]};this.dom={buttons:h("<div/>").addClass(this.classes.buttonContainer),condition:h("<select disabled/>").addClass(this.classes.condition).addClass(this.classes.dropDown).addClass(this.classes.italic).attr("autocomplete","hacking"),conditionTitle:h('<option value="" disabled selected hidden/>').html(this.s.dt.i18n("searchBuilder.condition",b.condition)),container:h("<div/>").addClass(this.classes.container),
data:h("<select/>").addClass(this.classes.data).addClass(this.classes.dropDown).addClass(this.classes.italic),dataTitle:h('<option value="" disabled selected hidden/>').html(this.s.dt.i18n("searchBuilder.data",b.data)),defaultValue:h("<select disabled/>").addClass(this.classes.value).addClass(this.classes.dropDown).addClass(this.classes.select).addClass(this.classes.italic),"delete":h("<button/>").html(this.s.dt.i18n("searchBuilder.delete",b["delete"])).addClass(this.classes["delete"]).addClass(this.classes.button).attr("title",
this.s.dt.i18n("searchBuilder.deleteTitle",b.deleteTitle)).attr("type","button"),left:h("<button/>").html(this.s.dt.i18n("searchBuilder.left",b.left)).addClass(this.classes.left).addClass(this.classes.button).attr("title",this.s.dt.i18n("searchBuilder.leftTitle",b.leftTitle)).attr("type","button"),right:h("<button/>").html(this.s.dt.i18n("searchBuilder.right",b.right)).addClass(this.classes.right).addClass(this.classes.button).attr("title",this.s.dt.i18n("searchBuilder.rightTitle",b.rightTitle)).attr("type",
"button"),value:[h("<select disabled/>").addClass(this.classes.value).addClass(this.classes.dropDown).addClass(this.classes.italic).addClass(this.classes.select)],valueTitle:h('<option value="--valueTitle--" disabled selected hidden/>').html(this.s.dt.i18n("searchBuilder.value",b.value))};if(this.c.greyscale)for(this.dom.data.addClass(this.classes.greyscale),this.dom.condition.addClass(this.classes.greyscale),this.dom.defaultValue.addClass(this.classes.greyscale),a=0,d=this.dom.value;a<d.length;a++)d[a].addClass(this.classes.greyscale);
this.s.dt.on("draw.dtsb",function(){g._adjustCriteria()});this.s.dt.on("buttons-action.dtsb",function(){g._adjustCriteria()});h(window).on("resize.dtsb",p.util.throttle(function(){g._adjustCriteria()}));this._buildCriteria();return this}c._escapeHTML=function(a){return a.toString().replace(/&/g,"&").replace(/</g,"<").replace(/>/g,">").replace(/"/g,'"')};c.prototype.updateArrows=function(a,b){void 0===a&&(a=!1);void 0===b&&(b=!0);this.dom.container.children().detach();this.dom.container.append(this.dom.data).append(this.dom.condition).append(this.dom.value[0]);
this.setListeners();void 0!==this.dom.value[0]&&this.dom.value[0].trigger("dtsb-inserted");for(var d=1;d<this.dom.value.length;d++)this.dom.container.append(this.dom.value[d]),this.dom.value[d].trigger("dtsb-inserted");1<this.s.depth&&this.dom.buttons.append(this.dom.left);(!1===this.c.depthLimit||this.s.depth<this.c.depthLimit)&&a?this.dom.buttons.append(this.dom.right):this.dom.right.remove();this.dom.buttons.append(this.dom["delete"]);this.dom.container.append(this.dom.buttons);b&&this._adjustCriteria()};
c.prototype.destroy=function(){this.dom.data.off(".dtsb");this.dom.condition.off(".dtsb");this.dom["delete"].off(".dtsb");for(var a=0,b=this.dom.value;a<b.length;a++)b[a].off(".dtsb");this.dom.container.remove()};c.prototype.search=function(a,b){var d=this.s.conditions[this.s.condition];if(void 0!==this.s.condition&&void 0!==d){var e=a[this.s.dataIdx];if(this.s.type.includes("num")&&(""!==this.s.dt.settings()[0].oLanguage.sDecimal||""!==this.s.dt.settings()[0].oLanguage.sThousands)){e=[a[this.s.dataIdx]];
""!==this.s.dt.settings()[0].oLanguage.sDecimal&&(e=a[this.s.dataIdx].split(this.s.dt.settings()[0].oLanguage.sDecimal));if(""!==this.s.dt.settings()[0].oLanguage.sThousands)for(a=0;a<e.length;a++)e[a]=e[a].replace(this.s.dt.settings()[0].oLanguage.sThousands,",");e=e.join(".")}"filter"!==this.c.orthogonal.search&&(e=this.s.dt.settings()[0],e=e.oApi._fnGetCellData(e,b,this.s.dataIdx,"string"===typeof this.c.orthogonal?this.c.orthogonal:this.c.orthogonal.search));if("array"===this.s.type)for(Array.isArray(e)||
(e=[e]),e.sort(),b=0,a=e;b<a.length;b++){var f=a[b];f&&"string"===typeof f&&f.replace(/[\r\n\u2028]/g," ")}else null!==e&&"string"===typeof e&&(e=e.replace(/[\r\n\u2028]/g," "));this.s.type.includes("html")&&"string"===typeof e&&(e=e.replace(/(<([^>]+)>)/ig,""));null===e&&(e="");return d.search(e,this.s.value,this)}};c.prototype.getDetails=function(a){void 0===a&&(a=!1);if(null!==this.s.type&&this.s.type.includes("num")&&(""!==this.s.dt.settings()[0].oLanguage.sDecimal||""!==this.s.dt.settings()[0].oLanguage.sThousands))for(a=
0;a<this.s.value.length;a++){var b=[this.s.value[a].toString()];""!==this.s.dt.settings()[0].oLanguage.sDecimal&&(b=this.s.value[a].split(this.s.dt.settings()[0].oLanguage.sDecimal));if(""!==this.s.dt.settings()[0].oLanguage.sThousands)for(var d=0;d<b.length;d++)b[d]=b[d].replace(this.s.dt.settings()[0].oLanguage.sThousands,",");this.s.value[a]=b.join(".")}else if(null!==this.s.type&&a)if(this.s.type.includes("date")||this.s.type.includes("time"))for(a=0;a<this.s.value.length;a++)null===this.s.value[a].match(/^\d{4}-([0]\d|1[0-2])-([0-2]\d|3[01])$/g)&&
(this.s.value[a]="");else if(this.s.type.includes("moment"))for(a=0;a<this.s.value.length;a++)this.s.value[a]=t(this.s.value[a],this.s.dateFormat).toISOString();else if(this.s.type.includes("luxon"))for(a=0;a<this.s.value.length;a++)this.s.value[a]=v.DateTime.fromFormat(this.s.value[a],this.s.dateFormat).toISO();if(this.s.type.includes("num")&&this.s.dt.page.info().serverSide)for(a=0;a<this.s.value.length;a++)this.s.value[a]=this.s.value[a].replace(/[^0-9.]/g,"");return{condition:this.s.condition,
data:this.s.data,origData:this.s.origData,type:this.s.type,value:this.s.value.map(function(e){return e.toString()})}};c.prototype.getNode=function(){return this.dom.container};c.prototype.populate=function(){this._populateData();-1!==this.s.dataIdx&&(this._populateCondition(),void 0!==this.s.condition&&this._populateValue())};c.prototype.rebuild=function(a){var b=!1,d;this._populateData();if(void 0!==a.data){var e=this.classes.italic,f=this.dom.data;this.dom.data.children("option").each(function(){!b&&
(h(this).text()===a.data||a.origData&&h(this).prop("origData")===a.origData)?(h(this).prop("selected",!0),f.removeClass(e),b=!0,d=h(this).val()):h(this).removeProp("selected")})}if(b){this.s.data=a.data;this.s.origData=a.origData;this.s.dataIdx=d;this.c.orthogonal=this._getOptions().orthogonal;this.dom.dataTitle.remove();this._populateCondition();this.dom.conditionTitle.remove();for(var g=void 0,n=this.dom.condition.children("option"),q=0;q<n.length;q++){var u=h(n[q]);void 0!==a.condition&&u.val()===
a.condition&&"string"===typeof a.condition?(u.prop("selected",!0),g=u.val()):u.removeProp("selected")}this.s.condition=g;if(void 0!==this.s.condition){this.dom.conditionTitle.removeProp("selected");this.dom.conditionTitle.remove();this.dom.condition.removeClass(this.classes.italic);for(q=0;q<n.length;q++)u=h(n[q]),u.val()!==this.s.condition&&u.removeProp("selected");this._populateValue(a)}else this.dom.conditionTitle.prependTo(this.dom.condition).prop("selected",!0)}};c.prototype.setListeners=function(){var a=
this;this.dom.data.unbind("change").on("change.dtsb",function(){a.dom.dataTitle.removeProp("selected");for(var b=a.dom.data.children("option."+a.classes.option),d=0;d<b.length;d++){var e=h(b[d]);e.val()===a.dom.data.val()?(a.dom.data.removeClass(a.classes.italic),e.prop("selected",!0),a.s.dataIdx=+e.val(),a.s.data=e.text(),a.s.origData=e.prop("origData"),a.c.orthogonal=a._getOptions().orthogonal,a._clearCondition(),a._clearValue(),a._populateCondition(),a.s.filled&&(a.s.filled=!1,a.s.dt.draw(),a.setListeners()),
a.s.dt.state.save()):e.removeProp("selected")}});this.dom.condition.unbind("change").on("change.dtsb",function(){a.dom.conditionTitle.removeProp("selected");for(var b=a.dom.condition.children("option."+a.classes.option),d=0;d<b.length;d++){var e=h(b[d]);if(e.val()===a.dom.condition.val()){a.dom.condition.removeClass(a.classes.italic);e.prop("selected",!0);e=e.val();for(var f=0,g=Object.keys(a.s.conditions);f<g.length;f++)if(g[f]===e){a.s.condition=e;break}a._clearValue();a._populateValue();e=0;for(f=
a.dom.value;e<f.length;e++)g=f[e],a.s.filled&&void 0!==g&&0!==a.dom.container.has(g[0]).length&&(a.s.filled=!1,a.s.dt.draw(),a.setListeners());(0===a.dom.value.length||1===a.dom.value.length&&void 0===a.dom.value[0])&&a.s.dt.draw()}else e.removeProp("selected")}})};c.prototype._adjustCriteria=function(){if(0!==h(document).has(this.dom.container).length){var a=this.dom.value[this.dom.value.length-1];if(void 0!==a&&0!==this.dom.container.has(a[0]).length){var b=a.outerWidth(!0);a=a.offset().left+b;
var d=this.dom.left.offset(),e=this.dom.right.offset(),f=this.dom["delete"].offset(),g=0!==this.dom.container.has(this.dom.left[0]).length,n=0!==this.dom.container.has(this.dom.right[0]).length,q=g?d.left:n?e.left:f.left;(15>q-a||g&&d.top!==f.top||n&&e.top!==f.top)&&!this.dom.container.parent().hasClass(this.classes.vertical)?(this.dom.container.parent().addClass(this.classes.vertical),this.s.topGroup.trigger("dtsb-redrawContents")):15<q-(this.dom.data.offset().left+this.dom.data.outerWidth(!0)+this.dom.condition.outerWidth(!0)+
b)&&this.dom.container.parent().hasClass(this.classes.vertical)&&(this.dom.container.parent().removeClass(this.classes.vertical),this.s.topGroup.trigger("dtsb-redrawContents"))}}};c.prototype._buildCriteria=function(){this.dom.data.append(this.dom.dataTitle);this.dom.condition.append(this.dom.conditionTitle);this.dom.container.append(this.dom.data).append(this.dom.condition);for(var a=0,b=this.dom.value;a<b.length;a++){var d=b[a];d.append(this.dom.valueTitle);this.dom.container.append(d)}this.dom.container.append(this.dom["delete"]).append(this.dom.right);
this.setListeners()};c.prototype._clearCondition=function(){this.dom.condition.empty();this.dom.conditionTitle.prop("selected",!0).attr("disabled","true");this.dom.condition.prepend(this.dom.conditionTitle).prop("selectedIndex",0);this.s.conditions={};this.s.condition=void 0};c.prototype._clearValue=function(){if(void 0!==this.s.condition){if(0<this.dom.value.length&&void 0!==this.dom.value[0])for(var a=function(f){void 0!==f&&setTimeout(function(){f.remove()},50)},b=0,d=this.dom.value;b<d.length;b++){var e=
d[b];a(e)}this.dom.value=[].concat(this.s.conditions[this.s.condition].init(this,c.updateListener));if(0<this.dom.value.length&&void 0!==this.dom.value[0])for(this.dom.value[0].insertAfter(this.dom.condition).trigger("dtsb-inserted"),e=1;e<this.dom.value.length;e++)this.dom.value[e].insertAfter(this.dom.value[e-1]).trigger("dtsb-inserted")}else{a=function(f){void 0!==f&&setTimeout(function(){f.remove()},50)};b=0;for(d=this.dom.value;b<d.length;b++)e=d[b],a(e);this.dom.valueTitle.prop("selected",!0);
this.dom.defaultValue.append(this.dom.valueTitle).insertAfter(this.dom.condition)}this.s.value=[];this.dom.value=[h("<select disabled/>").addClass(this.classes.value).addClass(this.classes.dropDown).addClass(this.classes.italic).addClass(this.classes.select).append(this.dom.valueTitle.clone())]};c.prototype._getOptions=function(){return h.extend(!0,{},c.defaults,this.s.dt.settings()[0].aoColumns[this.s.dataIdx].searchBuilder)};c.prototype._populateCondition=function(){var a=[],b=Object.keys(this.s.conditions).length;
if(0===b){b=+this.dom.data.children("option:selected").val();this.s.type=this.s.dt.columns().type().toArray()[b];var d=this.s.dt.settings()[0].aoColumns;if(void 0!==d)if(d=d[b],void 0!==d.searchBuilderType&&null!==d.searchBuilderType)this.s.type=d.searchBuilderType;else if(void 0===this.s.type||null===this.s.type)this.s.type=d.sType;if(null===this.s.type||void 0===this.s.type)h.fn.dataTable.ext.oApi._fnColumnTypes(this.s.dt.settings()[0]),this.s.type=this.s.dt.columns().type().toArray()[b];this.dom.condition.removeAttr("disabled").empty().append(this.dom.conditionTitle).addClass(this.classes.italic);
this.dom.conditionTitle.prop("selected",!0);b=this.s.dt.settings()[0].oLanguage.sDecimal;""!==b&&this.s.type.indexOf(b)===this.s.type.length-b.length&&(this.s.type.includes("num-fmt")?this.s.type=this.s.type.replace(b,""):this.s.type.includes("num")&&(this.s.type=this.s.type.replace(b,"")));var e=void 0!==this.c.conditions[this.s.type]?this.c.conditions[this.s.type]:this.s.type.includes("moment")?this.c.conditions.moment:this.s.type.includes("luxon")?this.c.conditions.luxon:this.c.conditions.string;
this.s.type.includes("moment")?this.s.dateFormat=this.s.type.replace(/moment-/g,""):this.s.type.includes("luxon")&&(this.s.dateFormat=this.s.type.replace(/luxon-/g,""));for(var f=0,g=Object.keys(e);f<g.length;f++)d=g[f],null!==e[d]&&(this.s.dt.page.info().serverSide&&e[d].init===c.initSelect&&(e[d].init=c.initInput,e[d].inputValue=c.inputValueInput,e[d].isInputValid=c.isInputValidInput),this.s.conditions[d]=e[d],b=e[d].conditionName,"function"===typeof b&&(b=b(this.s.dt,this.c.i18n)),a.push(h("<option>",
{text:b,value:d}).addClass(this.classes.option).addClass(this.classes.notItalic)))}else if(0<b)for(this.dom.condition.empty().removeAttr("disabled").addClass(this.classes.italic),e=0,f=Object.keys(this.s.conditions);e<f.length;e++)d=f[e],b=this.s.conditions[d].conditionName,"function"===typeof b&&(b=b(this.s.dt,this.c.i18n)),d=h("<option>",{text:b,value:d}).addClass(this.classes.option).addClass(this.classes.notItalic),void 0!==this.s.condition&&this.s.condition===b&&(d.prop("selected",!0),this.dom.condition.removeClass(this.classes.italic)),
a.push(d);else{this.dom.condition.attr("disabled","true").addClass(this.classes.italic);return}for(b=0;b<a.length;b++)this.dom.condition.append(a[b]);this.dom.condition.prop("selectedIndex",0)};c.prototype._populateData=function(){var a=this;this.dom.data.empty().append(this.dom.dataTitle);if(0===this.s.dataPoints.length)this.s.dt.columns().every(function(g){if(!0===a.c.columns||a.s.dt.columns(a.c.columns).indexes().toArray().includes(g)){for(var n=!1,q=0,u=a.s.dataPoints;q<u.length;q++)if(u[q].index===
g){n=!0;break}n||(n=a.s.dt.settings()[0].aoColumns[g],g={index:g,origData:n.data,text:(void 0===n.searchBuilderTitle?n.sTitle:n.searchBuilderTitle).replace(/(<([^>]+)>)/ig,"")},a.s.dataPoints.push(g),a.dom.data.append(h("<option>",{text:g.text,value:g.index}).addClass(a.classes.option).addClass(a.classes.notItalic).prop("origData",n.data).prop("selected",a.s.dataIdx===g.index?!0:!1)),a.s.dataIdx===g.index&&a.dom.dataTitle.removeProp("selected"))}});else for(var b=function(g){d.s.dt.columns().every(function(q){var u=
a.s.dt.settings()[0].aoColumns[q];(void 0===u.searchBuilderTitle?u.sTitle:u.searchBuilderTitle).replace(/(<([^>]+)>)/ig,"")===g.text&&(g.index=q,g.origData=u.data)});var n=h("<option>",{text:g.text.replace(/(<([^>]+)>)/ig,""),value:g.index}).addClass(d.classes.option).addClass(d.classes.notItalic).prop("origData",g.origData);d.s.data===g.text&&(d.s.dataIdx=g.index,d.dom.dataTitle.removeProp("selected"),n.prop("selected",!0),d.dom.data.removeClass(d.classes.italic));d.dom.data.append(n)},d=this,e=
0,f=this.s.dataPoints;e<f.length;e++)b(f[e])};c.prototype._populateValue=function(a){var b=this,d=this.s.filled;this.s.filled=!1;setTimeout(function(){b.dom.defaultValue.remove()},50);for(var e=function(n){setTimeout(function(){void 0!==n&&n.remove()},50)},f=0,g=this.dom.value;f<g.length;f++)e(g[f]);e=this.dom.container.children();if(3<e.length)for(f=2;f<e.length-1;f++)h(e[f]).remove();void 0!==a&&this.s.dt.columns().every(function(n){b.s.dt.settings()[0].aoColumns[n].sTitle===a.data&&(b.s.dataIdx=
n)});this.dom.value=[].concat(this.s.conditions[this.s.condition].init(this,c.updateListener,void 0!==a?a.value:void 0));void 0!==a&&void 0!==a.value&&(this.s.value=a.value);void 0!==this.dom.value[0]&&this.dom.value[0].insertAfter(this.dom.condition).trigger("dtsb-inserted");for(f=1;f<this.dom.value.length;f++)this.dom.value[f].insertAfter(this.dom.value[f-1]).trigger("dtsb-inserted");this.s.filled=this.s.conditions[this.s.condition].isInputValid(this.dom.value,this);this.setListeners();d!==this.s.filled&&
(this.s.dt.page.info().serverSide||this.s.dt.draw(),this.setListeners())};c.prototype._throttle=function(a,b){void 0===b&&(b=200);var d=null,e=null,f=this;null===b&&(b=200);return function(){for(var g=[],n=0;n<arguments.length;n++)g[n]=arguments[n];n=+new Date;null!==d&&n<d+b?clearTimeout(e):d=n;e=setTimeout(function(){d=null;a.apply(f,g)},b)}};c.version="1.1.0";c.classes={button:"dtsb-button",buttonContainer:"dtsb-buttonContainer",condition:"dtsb-condition",container:"dtsb-criteria",data:"dtsb-data",
"delete":"dtsb-delete",dropDown:"dtsb-dropDown",greyscale:"dtsb-greyscale",input:"dtsb-input",italic:"dtsb-italic",joiner:"dtsp-joiner",left:"dtsb-left",notItalic:"dtsb-notItalic",option:"dtsb-option",right:"dtsb-right",select:"dtsb-select",value:"dtsb-value",vertical:"dtsb-vertical"};c.initSelect=function(a,b,d,e){void 0===d&&(d=null);void 0===e&&(e=!1);var f=a.dom.data.children("option:selected").val(),g=a.s.dt.rows().indexes().toArray(),n=a.s.dt.settings()[0];a.dom.valueTitle.prop("selected",!0);
var q=h("<select/>").addClass(c.classes.value).addClass(c.classes.dropDown).addClass(c.classes.italic).addClass(c.classes.select).append(a.dom.valueTitle).on("change.dtsb",function(){h(this).removeClass(c.classes.italic);b(a,this)});a.c.greyscale&&q.addClass(c.classes.greyscale);for(var u=[],D=[],H=0;H<g.length;H++){var z=g[H],A=n.oApi._fnGetCellData(n,z,f,"string"===typeof a.c.orthogonal?a.c.orthogonal:a.c.orthogonal.search);A="string"===typeof A?A.replace(/[\r\n\u2028]/g," "):A;z=n.oApi._fnGetCellData(n,
z,f,"string"===typeof a.c.orthogonal?a.c.orthogonal:a.c.orthogonal.display);"array"===a.s.type&&(A=Array.isArray(A)?A:[A],z=Array.isArray(z)?z:[z]);var J=function(w,y){a.s.type.includes("html")&&null!==w&&"string"===typeof w&&w.replace(/(<([^>]+)>)/ig,"");w=h("<option>",{type:Array.isArray(w)?"Array":"String",value:w}).data("sbv",w).addClass(a.classes.option).addClass(a.classes.notItalic).html("string"===typeof y?y.replace(/(<([^>]+)>)/ig,""):y);y=w.val();-1===u.indexOf(y)&&(u.push(y),D.push(w),null!==
d&&Array.isArray(d[0])&&(d[0]=d[0].sort().join(",")),null!==d&&w.val()===d[0]&&(w.prop("selected",!0),q.removeClass(c.classes.italic),a.dom.valueTitle.removeProp("selected")))};if(e)for(var F=0;F<A.length;F++)J(A[F],z[F]);else J(A,Array.isArray(z)?z.join(", "):z)}D.sort(function(w,y){if("array"===a.s.type||"string"===a.s.type||"html"===a.s.type)return w.val()<y.val()?-1:w.val()>y.val()?1:0;if("num"===a.s.type||"html-num"===a.s.type)return+w.val().replace(/(<([^>]+)>)/ig,"")<+y.val().replace(/(<([^>]+)>)/ig,
"")?-1:+w.val().replace(/(<([^>]+)>)/ig,"")>+y.val().replace(/(<([^>]+)>)/ig,"")?1:0;if("num-fmt"===a.s.type||"html-num-fmt"===a.s.type)return+w.val().replace(/[^0-9.]/g,"")<+y.val().replace(/[^0-9.]/g,"")?-1:+w.val().replace(/[^0-9.]/g,"")>+y.val().replace(/[^0-9.]/g,"")?1:0});for(e=0;e<D.length;e++)q.append(D[e]);return q};c.initSelectArray=function(a,b,d){void 0===d&&(d=null);return c.initSelect(a,b,d,!0)};c.initInput=function(a,b,d){void 0===d&&(d=null);var e=a.s.dt.settings()[0].searchDelay;
e=h("<input/>").addClass(c.classes.value).addClass(c.classes.input).on("input.dtsb keypress.dtsb",a._throttle(function(f){f=f.keyCode||f.which;if(!(a.c.enterSearch||void 0!==a.s.dt.settings()[0].oInit.search&&a.s.dt.settings()[0].oInit.search["return"])||13===f)return b(a,this)},null===e?100:e));a.c.greyscale&&e.addClass(c.classes.greyscale);null!==d&&e.val(d[0]);a.s.dt.one("draw.dtsb",function(){a.s.topGroup.trigger("dtsb-redrawLogic")});return e};c.init2Input=function(a,b,d){void 0===d&&(d=null);
var e=a.s.dt.settings()[0].searchDelay;e=[h("<input/>").addClass(c.classes.value).addClass(c.classes.input).on("input.dtsb keypress.dtsb",a._throttle(function(f){f=f.keyCode||f.which;if(!(a.c.enterSearch||void 0!==a.s.dt.settings()[0].oInit.search&&a.s.dt.settings()[0].oInit.search["return"])||13===f)return b(a,this)},null===e?100:e)),h("<span>").addClass(a.classes.joiner).html(a.s.dt.i18n("searchBuilder.valueJoiner",a.c.i18n.valueJoiner)),h("<input/>").addClass(c.classes.value).addClass(c.classes.input).on("input.dtsb keypress.dtsb",
a._throttle(function(f){f=f.keyCode||f.which;if(!(a.c.enterSearch||void 0!==a.s.dt.settings()[0].oInit.search&&a.s.dt.settings()[0].oInit.search["return"])||13===f)return b(a,this)},null===e?100:e))];a.c.greyscale&&(e[0].addClass(c.classes.greyscale),e[2].addClass(c.classes.greyscale));null!==d&&(e[0].val(d[0]),e[2].val(d[1]));a.s.dt.one("draw.dtsb",function(){a.s.topGroup.trigger("dtsb-redrawLogic")});return e};c.initDate=function(a,b,d){void 0===d&&(d=null);var e=a.s.dt.settings()[0].searchDelay,
f=h("<input/>").addClass(c.classes.value).addClass(c.classes.input).dtDateTime({attachTo:"input",format:a.s.dateFormat?a.s.dateFormat:void 0}).on("change.dtsb",a._throttle(function(){return b(a,this)},null===e?100:e)).on("input.dtsb keypress.dtsb",a.c.enterSearch||void 0!==a.s.dt.settings()[0].oInit.search&&a.s.dt.settings()[0].oInit.search["return"]?function(g){a._throttle(function(){if(13===(g.keyCode||g.which))return b(a,this)},null===e?100:e)}:a._throttle(function(){return b(a,this)},null===e?
100:e));a.c.greyscale&&f.addClass(c.classes.greyscale);null!==d&&f.val(d[0]);a.s.dt.one("draw.dtsb",function(){a.s.topGroup.trigger("dtsb-redrawLogic")});return f};c.initNoValue=function(a){a.s.dt.one("draw.dtsb",function(){a.s.topGroup.trigger("dtsb-redrawLogic")})};c.init2Date=function(a,b,d){var e=this;void 0===d&&(d=null);var f=a.s.dt.settings()[0].searchDelay;f=[h("<input/>").addClass(c.classes.value).addClass(c.classes.input).dtDateTime({attachTo:"input",format:a.s.dateFormat?a.s.dateFormat:
void 0}).on("change.dtsb",null!==f?a.s.dt.settings()[0].oApi._fnThrottle(function(){return b(a,this)},f):function(){b(a,e)}).on("input.dtsb keypress.dtsb",a.c.enterSearch||void 0!==a.s.dt.settings()[0].oInit.search&&a.s.dt.settings()[0].oInit.search["return"]||null===f?a.c.enterSearch||void 0!==a.s.dt.settings()[0].oInit.search&&a.s.dt.settings()[0].oInit.search["return"]?function(g){13===(g.keyCode||g.which)&&b(a,e)}:function(){b(a,e)}:a.s.dt.settings()[0].oApi._fnThrottle(function(){return b(a,
this)},f)),h("<span>").addClass(a.classes.joiner).html(a.s.dt.i18n("searchBuilder.valueJoiner",a.c.i18n.valueJoiner)),h("<input/>").addClass(c.classes.value).addClass(c.classes.input).dtDateTime({attachTo:"input",format:a.s.dateFormat?a.s.dateFormat:void 0}).on("change.dtsb",null!==f?a.s.dt.settings()[0].oApi._fnThrottle(function(){return b(a,this)},f):function(){b(a,e)}).on("input.dtsb keypress.dtsb",a.c.enterSearch||void 0!==a.s.dt.settings()[0].oInit.search&&a.s.dt.settings()[0].oInit.search["return"]||
null===f?a.c.enterSearch||void 0!==a.s.dt.settings()[0].oInit.search&&a.s.dt.settings()[0].oInit.search["return"]?function(g){13===(g.keyCode||g.which)&&b(a,e)}:function(){b(a,e)}:a.s.dt.settings()[0].oApi._fnThrottle(function(){return b(a,this)},f))];a.c.greyscale&&(f[0].addClass(c.classes.greyscale),f[2].addClass(c.classes.greyscale));null!==d&&0<d.length&&(f[0].val(d[0]),f[2].val(d[1]));a.s.dt.one("draw.dtsb",function(){a.s.topGroup.trigger("dtsb-redrawLogic")});return f};c.isInputValidSelect=
function(a){for(var b=!0,d=0;d<a.length;d++){var e=a[d];e.children("option:selected").length===e.children("option").length-e.children("option."+c.classes.notItalic).length&&1===e.children("option:selected").length&&e.children("option:selected")[0]===e.children("option")[0]&&(b=!1)}return b};c.isInputValidInput=function(a){for(var b=!0,d=0;d<a.length;d++){var e=a[d];e.is("input")&&0===e.val().length&&(b=!1)}return b};c.inputValueSelect=function(a){for(var b=[],d=0;d<a.length;d++){var e=a[d];e.is("select")&&
b.push(c._escapeHTML(e.children("option:selected").data("sbv")))}return b};c.inputValueInput=function(a){for(var b=[],d=0;d<a.length;d++){var e=a[d];e.is("input")&&b.push(c._escapeHTML(e.val()))}return b};c.updateListener=function(a,b){var d=a.s.conditions[a.s.condition];a.s.filled=d.isInputValid(a.dom.value,a);a.s.value=d.inputValue(a.dom.value,a);if(a.s.filled){Array.isArray(a.s.value)||(a.s.value=[a.s.value]);for(d=0;d<a.s.value.length;d++)if(Array.isArray(a.s.value[d]))a.s.value[d].sort();else if(a.s.type.includes("num")&&
(""!==a.s.dt.settings()[0].oLanguage.sDecimal||""!==a.s.dt.settings()[0].oLanguage.sThousands)){var e=[a.s.value[d].toString()];""!==a.s.dt.settings()[0].oLanguage.sDecimal&&(e=a.s.value[d].split(a.s.dt.settings()[0].oLanguage.sDecimal));if(""!==a.s.dt.settings()[0].oLanguage.sThousands)for(var f=0;f<e.length;f++)e[f]=e[f].replace(a.s.dt.settings()[0].oLanguage.sThousands,",");a.s.value[d]=e.join(".")}f=e=null;for(d=0;d<a.dom.value.length;d++)b===a.dom.value[d][0]&&(e=d,void 0!==b.selectionStart&&
(f=b.selectionStart));a.s.dt.draw();null!==e&&(a.dom.value[e].removeClass(a.classes.italic),a.dom.value[e].focus(),null!==f&&a.dom.value[e][0].setSelectionRange(f,f))}else a.s.dt.draw()};c.dateConditions={"=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.equals",b.conditions.date.equals)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=a.replace(/(\/|-|,)/g,"-");return a===b[0]}},"!=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.not",
b.conditions.date.not)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=a.replace(/(\/|-|,)/g,"-");return a!==b[0]}},"<":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.before",b.conditions.date.before)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=a.replace(/(\/|-|,)/g,"-");return a<b[0]}},">":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.after",
b.conditions.date.after)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=a.replace(/(\/|-|,)/g,"-");return a>b[0]}},between:{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.between",b.conditions.date.between)},init:c.init2Date,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=a.replace(/(\/|-|,)/g,"-");return b[0]<b[1]?b[0]<=a&&a<=b[1]:b[1]<=a&&a<=b[0]}},"!between":{conditionName:function(a,
b){return a.i18n("searchBuilder.conditions.date.notBetween",b.conditions.date.notBetween)},init:c.init2Date,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=a.replace(/(\/|-|,)/g,"-");return b[0]<b[1]?!(b[0]<=a&&a<=b[1]):!(b[1]<=a&&a<=b[0])}},"null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.empty",b.conditions.date.empty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return null===
a||void 0===a||0===a.length}},"!null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.notEmpty",b.conditions.date.notEmpty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return!(null===a||void 0===a||0===a.length)}}};c.momentDateConditions={"=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.equals",b.conditions.date.equals)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,
search:function(a,b,d){return t(a,d.s.dateFormat).valueOf()===t(b[0],d.s.dateFormat).valueOf()}},"!=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.not",b.conditions.date.not)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){return t(a,d.s.dateFormat).valueOf()!==t(b[0],d.s.dateFormat).valueOf()}},"<":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.before",b.conditions.date.before)},init:c.initDate,
inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){return t(a,d.s.dateFormat).valueOf()<t(b[0],d.s.dateFormat).valueOf()}},">":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.after",b.conditions.date.after)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){return t(a,d.s.dateFormat).valueOf()>t(b[0],d.s.dateFormat).valueOf()}},between:{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.between",
b.conditions.date.between)},init:c.init2Date,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){a=t(a,d.s.dateFormat).valueOf();var e=t(b[0],d.s.dateFormat).valueOf();b=t(b[1],d.s.dateFormat).valueOf();return e<b?e<=a&&a<=b:b<=a&&a<=e}},"!between":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.notBetween",b.conditions.date.notBetween)},init:c.init2Date,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){a=
t(a,d.s.dateFormat).valueOf();var e=t(b[0],d.s.dateFormat).valueOf();b=t(b[1],d.s.dateFormat).valueOf();return e<b?!(+e<=+a&&+a<=+b):!(+b<=+a&&+a<=+e)}},"null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.empty",b.conditions.date.empty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return null===a||void 0===a||0===a.length}},"!null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.notEmpty",b.conditions.date.notEmpty)},
init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return!(null===a||void 0===a||0===a.length)}}};c.luxonDateConditions={"=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.equals",b.conditions.date.equals)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){return v.DateTime.fromFormat(a,d.s.dateFormat).ts===v.DateTime.fromFormat(b[0],d.s.dateFormat).ts}},"!=":{conditionName:function(a,
b){return a.i18n("searchBuilder.conditions.date.not",b.conditions.date.not)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){return v.DateTime.fromFormat(a,d.s.dateFormat).ts!==v.DateTime.fromFormat(b[0],d.s.dateFormat).ts}},"<":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.before",b.conditions.date.before)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){return v.DateTime.fromFormat(a,
d.s.dateFormat).ts<v.DateTime.fromFormat(b[0],d.s.dateFormat).ts}},">":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.after",b.conditions.date.after)},init:c.initDate,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){return v.DateTime.fromFormat(a,d.s.dateFormat).ts>v.DateTime.fromFormat(b[0],d.s.dateFormat).ts}},between:{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.between",b.conditions.date.between)},init:c.init2Date,
inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){a=v.DateTime.fromFormat(a,d.s.dateFormat).ts;var e=v.DateTime.fromFormat(b[0],d.s.dateFormat).ts;b=v.DateTime.fromFormat(b[1],d.s.dateFormat).ts;return e<b?e<=a&&a<=b:b<=a&&a<=e}},"!between":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.notBetween",b.conditions.date.notBetween)},init:c.init2Date,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b,d){a=v.DateTime.fromFormat(a,
d.s.dateFormat).ts;var e=v.DateTime.fromFormat(b[0],d.s.dateFormat).ts;b=v.DateTime.fromFormat(b[1],d.s.dateFormat).ts;return e<b?!(+e<=+a&&+a<=+b):!(+b<=+a&&+a<=+e)}},"null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.empty",b.conditions.date.empty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return null===a||void 0===a||0===a.length}},"!null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.date.notEmpty",
b.conditions.date.notEmpty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return!(null===a||void 0===a||0===a.length)}}};c.numConditions={"=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.equals",b.conditions.number.equals)},init:c.initSelect,inputValue:c.inputValueSelect,isInputValid:c.isInputValidSelect,search:function(a,b){return+a===+b[0]}},"!=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.not",
b.conditions.number.not)},init:c.initSelect,inputValue:c.inputValueSelect,isInputValid:c.isInputValidSelect,search:function(a,b){return+a!==+b[0]}},"<":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.lt",b.conditions.number.lt)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return+a<+b[0]}},"<=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.lte",b.conditions.number.lte)},init:c.initInput,
inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return+a<=+b[0]}},">=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.gte",b.conditions.number.gte)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return+a>=+b[0]}},">":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.gt",b.conditions.number.gt)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,
search:function(a,b){return+a>+b[0]}},between:{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.between",b.conditions.number.between)},init:c.init2Input,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return+b[0]<+b[1]?+b[0]<=+a&&+a<=+b[1]:+b[1]<=+a&&+a<=+b[0]}},"!between":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.notBetween",b.conditions.number.notBetween)},init:c.init2Input,inputValue:c.inputValueInput,
isInputValid:c.isInputValidInput,search:function(a,b){return+b[0]<+b[1]?!(+b[0]<=+a&&+a<=+b[1]):!(+b[1]<=+a&&+a<=+b[0])}},"null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.empty",b.conditions.number.empty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return null===a||void 0===a||0===a.length}},"!null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.notEmpty",b.conditions.number.notEmpty)},
init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return!(null===a||void 0===a||0===a.length)}}};c.numFmtConditions={"=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.equals",b.conditions.number.equals)},init:c.initSelect,inputValue:c.inputValueSelect,isInputValid:c.isInputValidSelect,search:function(a,b){a=0===a.indexOf("-")?"-"+a.replace(/[^0-9.]/g,""):a.replace(/[^0-9.]/g,"");b=0===b[0].indexOf("-")?"-"+b[0].replace(/[^0-9.]/g,
""):b[0].replace(/[^0-9.]/g,"");return+a===+b}},"!=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.not",b.conditions.number.not)},init:c.initSelect,inputValue:c.inputValueSelect,isInputValid:c.isInputValidSelect,search:function(a,b){a=0===a.indexOf("-")?"-"+a.replace(/[^0-9.]/g,""):a.replace(/[^0-9.]/g,"");b=0===b[0].indexOf("-")?"-"+b[0].replace(/[^0-9.]/g,""):b[0].replace(/[^0-9.]/g,"");return+a!==+b}},"<":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.lt",
b.conditions.number.lt)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=0===a.indexOf("-")?"-"+a.replace(/[^0-9.]/g,""):a.replace(/[^0-9.]/g,"");b=0===b[0].indexOf("-")?"-"+b[0].replace(/[^0-9.]/g,""):b[0].replace(/[^0-9.]/g,"");return+a<+b}},"<=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.lte",b.conditions.number.lte)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,
b){a=0===a.indexOf("-")?"-"+a.replace(/[^0-9.]/g,""):a.replace(/[^0-9.]/g,"");b=0===b[0].indexOf("-")?"-"+b[0].replace(/[^0-9.]/g,""):b[0].replace(/[^0-9.]/g,"");return+a<=+b}},">=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.gte",b.conditions.number.gte)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=0===a.indexOf("-")?"-"+a.replace(/[^0-9.]/g,""):a.replace(/[^0-9.]/g,"");b=0===b[0].indexOf("-")?"-"+b[0].replace(/[^0-9.]/g,
""):b[0].replace(/[^0-9.]/g,"");return+a>=+b}},">":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.gt",b.conditions.number.gt)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=0===a.indexOf("-")?"-"+a.replace(/[^0-9.]/g,""):a.replace(/[^0-9.]/g,"");b=0===b[0].indexOf("-")?"-"+b[0].replace(/[^0-9.]/g,""):b[0].replace(/[^0-9.]/g,"");return+a>+b}},between:{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.between",
b.conditions.number.between)},init:c.init2Input,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=0===a.indexOf("-")?"-"+a.replace(/[^0-9.]/g,""):a.replace(/[^0-9.]/g,"");var d=0===b[0].indexOf("-")?"-"+b[0].replace(/[^0-9.]/g,""):b[0].replace(/[^0-9.]/g,"");b=0===b[1].indexOf("-")?"-"+b[1].replace(/[^0-9.]/g,""):b[1].replace(/[^0-9.]/g,"");return+d<+b?+d<=+a&&+a<=+b:+b<=+a&&+a<=+d}},"!between":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.notBetween",
b.conditions.number.notBetween)},init:c.init2Input,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){a=0===a.indexOf("-")?"-"+a.replace(/[^0-9.]/g,""):a.replace(/[^0-9.]/g,"");var d=0===b[0].indexOf("-")?"-"+b[0].replace(/[^0-9.]/g,""):b[0].replace(/[^0-9.]/g,"");b=0===b[1].indexOf("-")?"-"+b[1].replace(/[^0-9.]/g,""):b[1].replace(/[^0-9.]/g,"");return+d<+b?!(+d<=+a&&+a<=+b):!(+b<=+a&&+a<=+d)}},"null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.empty",
b.conditions.number.empty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return null===a||void 0===a||0===a.length}},"!null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.number.notEmpty",b.conditions.number.notEmpty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return!(null===a||void 0===a||0===a.length)}}};c.stringConditions={"=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.string.equals",
b.conditions.string.equals)},init:c.initSelect,inputValue:c.inputValueSelect,isInputValid:c.isInputValidSelect,search:function(a,b){return a===b[0]}},"!=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.string.not",b.conditions.string.not)},init:c.initSelect,inputValue:c.inputValueSelect,isInputValid:c.isInputValidInput,search:function(a,b){return a!==b[0]}},starts:{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.string.startsWith",b.conditions.string.startsWith)},
init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return 0===a.toLowerCase().indexOf(b[0].toLowerCase())}},"!starts":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.string.notStartsWith",b.conditions.string.notStartsWith)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return 0!==a.toLowerCase().indexOf(b[0].toLowerCase())}},contains:{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.string.contains",
b.conditions.string.contains)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return a.toLowerCase().includes(b[0].toLowerCase())}},"!contains":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.string.notContains",b.conditions.string.notContains)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return!a.toLowerCase().includes(b[0].toLowerCase())}},ends:{conditionName:function(a,
b){return a.i18n("searchBuilder.conditions.string.endsWith",b.conditions.string.endsWith)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return a.toLowerCase().endsWith(b[0].toLowerCase())}},"!ends":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.string.notEndsWith",b.conditions.string.notEndsWith)},init:c.initInput,inputValue:c.inputValueInput,isInputValid:c.isInputValidInput,search:function(a,b){return!a.toLowerCase().endsWith(b[0].toLowerCase())}},
"null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.string.empty",b.conditions.string.empty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return null===a||void 0===a||0===a.length}},"!null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.string.notEmpty",b.conditions.string.notEmpty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return!(null===a||void 0===
a||0===a.length)}}};c.arrayConditions={contains:{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.array.contains",b.conditions.array.contains)},init:c.initSelectArray,inputValue:c.inputValueSelect,isInputValid:c.isInputValidSelect,search:function(a,b){return a.includes(b[0])}},without:{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.array.without",b.conditions.array.without)},init:c.initSelectArray,inputValue:c.inputValueSelect,isInputValid:c.isInputValidSelect,
search:function(a,b){return-1===a.indexOf(b[0])}},"=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.array.equals",b.conditions.array.equals)},init:c.initSelect,inputValue:c.inputValueSelect,isInputValid:c.isInputValidSelect,search:function(a,b){if(a.length===b[0].length){for(var d=0;d<a.length;d++)if(a[d]!==b[0][d])return!1;return!0}return!1}},"!=":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.array.not",b.conditions.array.not)},init:c.initSelect,inputValue:c.inputValueSelect,
isInputValid:c.isInputValidSelect,search:function(a,b){if(a.length===b[0].length){for(var d=0;d<a.length;d++)if(a[d]!==b[0][d])return!0;return!1}return!0}},"null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.array.empty",b.conditions.array.empty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return null===a||void 0===a||0===a.length}},"!null":{conditionName:function(a,b){return a.i18n("searchBuilder.conditions.array.notEmpty",
b.conditions.array.notEmpty)},init:c.initNoValue,inputValue:function(){},isInputValid:function(){return!0},search:function(a){return null!==a&&void 0!==a&&0!==a.length}}};c.defaults={columns:!0,conditions:{array:c.arrayConditions,date:c.dateConditions,html:c.stringConditions,"html-num":c.numConditions,"html-num-fmt":c.numFmtConditions,luxon:c.luxonDateConditions,moment:c.momentDateConditions,num:c.numConditions,"num-fmt":c.numFmtConditions,string:c.stringConditions},depthLimit:!1,enterSearch:!1,filterChanged:void 0,
greyscale:!1,i18n:{add:"Add Condition",button:{0:"Search Builder",_:"Search Builder (%d)"},clearAll:"Clear All",condition:"Condition",data:"Data","delete":"×",deleteTitle:"Delete filtering rule",left:"<",leftTitle:"Outdent criteria",logicAnd:"And",logicOr:"Or",right:">",rightTitle:"Indent criteria",title:{0:"Custom Search Builder",_:"Custom Search Builder (%d)"},value:"Value",valueJoiner:"and"},logic:"AND",orthogonal:{display:"display",search:"filter"},preDefined:!1};return c}(),B,E,G=function(){function c(a,
b,d,e,f,g){void 0===e&&(e=0);void 0===f&&(f=!1);void 0===g&&(g=1);if(!E||!E.versionCheck||!E.versionCheck("1.10.0"))throw Error("SearchBuilder requires DataTables 1.10 or newer");this.classes=B.extend(!0,{},c.classes);this.c=B.extend(!0,{},c.defaults,b);this.s={criteria:[],depth:g,dt:a,index:e,isChild:f,logic:void 0,opts:b,preventRedraw:!1,toDrop:void 0,topGroup:d};this.dom={add:B("<button/>").addClass(this.classes.add).addClass(this.classes.button).attr("type","button"),clear:B("<button>×</button>").addClass(this.classes.button).addClass(this.classes.clearGroup).attr("type",
"button"),container:B("<div/>").addClass(this.classes.group),logic:B("<button><div/></button>").addClass(this.classes.logic).addClass(this.classes.button).attr("type","button"),logicContainer:B("<div/>").addClass(this.classes.logicContainer)};void 0===this.s.topGroup&&(this.s.topGroup=this.dom.container);this._setup();return this}c.prototype.destroy=function(){this.dom.add.off(".dtsb");this.dom.logic.off(".dtsb");this.dom.container.trigger("dtsb-destroy").remove();this.s.criteria=[]};c.prototype.getDetails=
function(a){void 0===a&&(a=!1);if(0===this.s.criteria.length)return{};for(var b={criteria:[],logic:this.s.logic},d=0,e=this.s.criteria;d<e.length;d++)b.criteria.push(e[d].criteria.getDetails(a));return b};c.prototype.getNode=function(){return this.dom.container};c.prototype.rebuild=function(a){if(!(void 0===a.criteria||null===a.criteria||Array.isArray(a.criteria)&&0===a.criteria.length)){this.s.logic=a.logic;this.dom.logic.children().first().html("OR"===this.s.logic?this.s.dt.i18n("searchBuilder.logicOr",
this.c.i18n.logicOr):this.s.dt.i18n("searchBuilder.logicAnd",this.c.i18n.logicAnd));if(Array.isArray(a.criteria))for(var b=0,d=a.criteria;b<d.length;b++)a=d[b],void 0!==a.logic?this._addPrevGroup(a):void 0===a.logic&&this._addPrevCriteria(a);b=0;for(d=this.s.criteria;b<d.length;b++)a=d[b],a.criteria instanceof r&&(a.criteria.updateArrows(1<this.s.criteria.length,!1),this._setCriteriaListeners(a.criteria))}};c.prototype.redrawContents=function(){if(!this.s.preventRedraw){this.dom.container.children().detach();
this.dom.container.append(this.dom.logicContainer).append(this.dom.add);this.s.criteria.sort(function(d,e){return d.criteria.s.index<e.criteria.s.index?-1:d.criteria.s.index>e.criteria.s.index?1:0});this.setListeners();for(var a=0;a<this.s.criteria.length;a++){var b=this.s.criteria[a].criteria;b instanceof r?(this.s.criteria[a].index=a,this.s.criteria[a].criteria.s.index=a,this.s.criteria[a].criteria.dom.container.insertBefore(this.dom.add),this._setCriteriaListeners(b),this.s.criteria[a].criteria.rebuild(this.s.criteria[a].criteria.getDetails())):
b instanceof c&&0<b.s.criteria.length?(this.s.criteria[a].index=a,this.s.criteria[a].criteria.s.index=a,this.s.criteria[a].criteria.dom.container.insertBefore(this.dom.add),b.redrawContents(),this._setGroupListeners(b)):(this.s.criteria.splice(a,1),a--)}this.setupLogic()}};c.prototype.redrawLogic=function(){for(var a=0,b=this.s.criteria;a<b.length;a++){var d=b[a];d instanceof c&&d.redrawLogic()}this.setupLogic()};c.prototype.search=function(a,b){return"AND"===this.s.logic?this._andSearch(a,b):"OR"===
this.s.logic?this._orSearch(a,b):!0};c.prototype.setupLogic=function(){this.dom.logicContainer.remove();this.dom.clear.remove();if(1>this.s.criteria.length)this.s.isChild||(this.dom.container.trigger("dtsb-destroy"),this.dom.container.css("margin-left",0));else{var a=this.dom.container.height()-1;this.dom.clear.height("0px");this.dom.logicContainer.append(this.dom.clear).width(a);this.dom.container.prepend(this.dom.logicContainer);this._setLogicListener();this.dom.container.css("margin-left",this.dom.logicContainer.outerHeight(!0));
a=this.dom.logicContainer.offset();var b=a.left,d=this.dom.container.offset().left;b=b-(b-d)-this.dom.logicContainer.outerHeight(!0);this.dom.logicContainer.offset({left:b});b=this.dom.logicContainer.next();a=a.top;b=B(b).offset().top;this.dom.logicContainer.offset({top:a-(a-b)});this.dom.clear.outerHeight(this.dom.logicContainer.height());this._setClearListener()}};c.prototype.setListeners=function(){var a=this;this.dom.add.unbind("click");this.dom.add.on("click.dtsb",function(){a.s.isChild||a.dom.container.prepend(a.dom.logicContainer);
a.addCriteria();a.dom.container.trigger("dtsb-add");a.s.dt.state.save();return!1});for(var b=0,d=this.s.criteria;b<d.length;b++)d[b].criteria.setListeners();this._setClearListener();this._setLogicListener()};c.prototype.addCriteria=function(a,b){void 0===a&&(a=null);void 0===b&&(b=!0);var d=null===a?this.s.criteria.length:a.s.index,e=new r(this.s.dt,this.s.opts,this.s.topGroup,d,this.s.depth);null!==a&&(e.c=a.c,e.s=a.s,e.s.depth=this.s.depth,e.classes=a.classes);e.populate();a=!1;for(var f=0;f<this.s.criteria.length;f++)0===
f&&this.s.criteria[f].criteria.s.index>e.s.index?(e.getNode().insertBefore(this.s.criteria[f].criteria.dom.container),a=!0):f<this.s.criteria.length-1&&this.s.criteria[f].criteria.s.index<e.s.index&&this.s.criteria[f+1].criteria.s.index>e.s.index&&(e.getNode().insertAfter(this.s.criteria[f].criteria.dom.container),a=!0);a||e.getNode().insertBefore(this.dom.add);this.s.criteria.push({criteria:e,index:d});this.s.criteria=this.s.criteria.sort(function(g,n){return g.criteria.s.index-n.criteria.s.index});
d=0;for(a=this.s.criteria;d<a.length;d++)f=a[d],f.criteria instanceof r&&f.criteria.updateArrows(1<this.s.criteria.length,b);this._setCriteriaListeners(e);e.setListeners();this.setupLogic()};c.prototype.checkFilled=function(){for(var a=0,b=this.s.criteria;a<b.length;a++){var d=b[a];if(d.criteria instanceof r&&d.criteria.s.filled||d.criteria instanceof c&&d.criteria.checkFilled())return!0}return!1};c.prototype.count=function(){for(var a=0,b=0,d=this.s.criteria;b<d.length;b++){var e=d[b];e.criteria instanceof
c?a+=e.criteria.count():a++}return a};c.prototype._addPrevGroup=function(a){var b=this.s.criteria.length,d=new c(this.s.dt,this.c,this.s.topGroup,b,!0,this.s.depth+1);this.s.criteria.push({criteria:d,index:b,logic:d.s.logic});d.rebuild(a);this.s.criteria[b].criteria=d;this.s.topGroup.trigger("dtsb-redrawContents");this._setGroupListeners(d)};c.prototype._addPrevCriteria=function(a){var b=this.s.criteria.length,d=new r(this.s.dt,this.s.opts,this.s.topGroup,b,this.s.depth);d.populate();this.s.criteria.push({criteria:d,
index:b});d.rebuild(a);this.s.criteria[b].criteria=d;this.s.topGroup.trigger("dtsb-redrawContents")};c.prototype._andSearch=function(a,b){if(0===this.s.criteria.length)return!0;for(var d=0,e=this.s.criteria;d<e.length;d++){var f=e[d];if(!(f.criteria instanceof r&&!f.criteria.s.filled||f.criteria.search(a,b)))return!1}return!0};c.prototype._orSearch=function(a,b){if(0===this.s.criteria.length)return!0;for(var d=!1,e=0,f=this.s.criteria;e<f.length;e++){var g=f[e];if(g.criteria instanceof r&&g.criteria.s.filled){if(d=
!0,g.criteria.search(a,b))return!0}else if(g.criteria instanceof c&&g.criteria.checkFilled()&&(d=!0,g.criteria.search(a,b)))return!0}return!d};c.prototype._removeCriteria=function(a,b){void 0===b&&(b=!1);if(1>=this.s.criteria.length&&this.s.isChild)this.destroy();else{for(var d=void 0,e=0;e<this.s.criteria.length;e++)this.s.criteria[e].index===a.s.index&&(!b||this.s.criteria[e].criteria instanceof c)&&(d=e);void 0!==d&&this.s.criteria.splice(d,1);for(e=0;e<this.s.criteria.length;e++)this.s.criteria[e].index=
e,this.s.criteria[e].criteria.s.index=e}};c.prototype._setCriteriaListeners=function(a){var b=this;a.dom["delete"].unbind("click").on("click.dtsb",function(){b._removeCriteria(a);a.dom.container.remove();for(var d=0,e=b.s.criteria;d<e.length;d++){var f=e[d];f.criteria instanceof r&&f.criteria.updateArrows(1<b.s.criteria.length)}a.destroy();b.s.dt.draw();b.s.topGroup.trigger("dtsb-redrawContents");return!1});a.dom.right.unbind("click").on("click.dtsb",function(){var d=a.s.index,e=new c(b.s.dt,b.s.opts,
b.s.topGroup,a.s.index,!0,b.s.depth+1);e.addCriteria(a);b.s.criteria[d].criteria=e;b.s.criteria[d].logic="AND";b.s.topGroup.trigger("dtsb-redrawContents");b._setGroupListeners(e);return!1});a.dom.left.unbind("click").on("click.dtsb",function(){b.s.toDrop=new r(b.s.dt,b.s.opts,b.s.topGroup,a.s.index);b.s.toDrop.s=a.s;b.s.toDrop.c=a.c;b.s.toDrop.classes=a.classes;b.s.toDrop.populate();var d=b.s.toDrop.s.index;b.dom.container.trigger("dtsb-dropCriteria");a.s.index=d;b._removeCriteria(a);b.s.topGroup.trigger("dtsb-redrawContents");
b.s.dt.draw();return!1})};c.prototype._setClearListener=function(){var a=this;this.dom.clear.unbind("click").on("click.dtsb",function(){if(!a.s.isChild)return a.dom.container.trigger("dtsb-clearContents"),!1;a.destroy();a.s.topGroup.trigger("dtsb-redrawContents");return!1})};c.prototype._setGroupListeners=function(a){var b=this;a.dom.add.unbind("click").on("click.dtsb",function(){b.setupLogic();b.dom.container.trigger("dtsb-add");return!1});a.dom.container.unbind("dtsb-add").on("dtsb-add.dtsb",function(){b.setupLogic();
b.dom.container.trigger("dtsb-add");return!1});a.dom.container.unbind("dtsb-destroy").on("dtsb-destroy.dtsb",function(){b._removeCriteria(a,!0);a.dom.container.remove();b.setupLogic();return!1});a.dom.container.unbind("dtsb-dropCriteria").on("dtsb-dropCriteria.dtsb",function(){var d=a.s.toDrop;d.s.index=a.s.index;d.updateArrows(1<b.s.criteria.length,!1);b.addCriteria(d,!1);return!1});a.setListeners()};c.prototype._setup=function(){this.setListeners();this.dom.add.html(this.s.dt.i18n("searchBuilder.add",
this.c.i18n.add));this.dom.logic.children().first().html("OR"===this.c.logic?this.s.dt.i18n("searchBuilder.logicOr",this.c.i18n.logicOr):this.s.dt.i18n("searchBuilder.logicAnd",this.c.i18n.logicAnd));this.s.logic="OR"===this.c.logic?"OR":"AND";this.c.greyscale&&this.dom.logic.addClass(this.classes.greyscale);this.dom.logicContainer.append(this.dom.logic).append(this.dom.clear);this.s.isChild&&this.dom.container.append(this.dom.logicContainer);this.dom.container.append(this.dom.add)};c.prototype._setLogicListener=
function(){var a=this;this.dom.logic.unbind("click").on("click.dtsb",function(){a._toggleLogic();a.s.dt.draw();for(var b=0,d=a.s.criteria;b<d.length;b++)d[b].criteria.setListeners()})};c.prototype._toggleLogic=function(){"OR"===this.s.logic?(this.s.logic="AND",this.dom.logic.children().first().html(this.s.dt.i18n("searchBuilder.logicAnd",this.c.i18n.logicAnd))):"AND"===this.s.logic&&(this.s.logic="OR",this.dom.logic.children().first().html(this.s.dt.i18n("searchBuilder.logicOr",this.c.i18n.logicOr)))};
c.version="1.1.0";c.classes={add:"dtsb-add",button:"dtsb-button",clearGroup:"dtsb-clearGroup",greyscale:"dtsb-greyscale",group:"dtsb-group",inputButton:"dtsb-iptbtn",logic:"dtsb-logic",logicContainer:"dtsb-logicContainer"};c.defaults={columns:!0,conditions:{date:r.dateConditions,html:r.stringConditions,"html-num":r.numConditions,"html-num-fmt":r.numFmtConditions,luxon:r.luxonDateConditions,moment:r.momentDateConditions,num:r.numConditions,"num-fmt":r.numFmtConditions,string:r.stringConditions},depthLimit:!1,
enterSearch:!1,filterChanged:void 0,greyscale:!1,i18n:{add:"Add Condition",button:{0:"Search Builder",_:"Search Builder (%d)"},clearAll:"Clear All",condition:"Condition",data:"Data","delete":"×",deleteTitle:"Delete filtering rule",left:"<",leftTitle:"Outdent criteria",logicAnd:"And",logicOr:"Or",right:">",rightTitle:"Indent criteria",title:{0:"Custom Search Builder",_:"Custom Search Builder (%d)"},value:"Value",valueJoiner:"and"},logic:"AND",orthogonal:{display:"display",search:"filter"},preDefined:!1};
return c}(),x,C,I=function(){function c(a,b){var d=this;if(!C||!C.versionCheck||!C.versionCheck("1.10.0"))throw Error("SearchBuilder requires DataTables 1.10 or newer");a=new C.Api(a);this.classes=x.extend(!0,{},c.classes);this.c=x.extend(!0,{},c.defaults,b);this.dom={clearAll:x('<button type="button">'+a.i18n("searchBuilder.clearAll",this.c.i18n.clearAll)+"</button>").addClass(this.classes.clearAll).addClass(this.classes.button).attr("type","button"),container:x("<div/>").addClass(this.classes.container),
title:x("<div/>").addClass(this.classes.title),titleRow:x("<div/>").addClass(this.classes.titleRow),topGroup:void 0};this.s={dt:a,opts:b,search:void 0,topGroup:void 0};if(void 0===a.settings()[0]._searchBuilder){a.settings()[0]._searchBuilder=this;if(this.s.dt.page.info().serverSide)this.s.dt.on("preXhr.dtsb",function(e,f,g){(e=d.s.dt.state.loaded())&&e.searchBuilder&&(g.searchBuilder=d._collapseArray(e.searchBuilder))});if(this.s.dt.settings()[0]._bInitComplete)this._setUp();else a.one("init.dt",
function(){d._setUp()});return this}}c.prototype.getDetails=function(a){void 0===a&&(a=!1);return this.s.topGroup.getDetails(a)};c.prototype.getNode=function(){return this.dom.container};c.prototype.rebuild=function(a){this.dom.clearAll.click();if(void 0===a||null===a)return this;this.s.topGroup.s.preventRedraw=!0;this.s.topGroup.rebuild(a);this.s.topGroup.s.preventRedraw=!1;this.s.topGroup.redrawContents();this.s.dt.draw(!1);this.s.topGroup.setListeners();return this};c.prototype._applyPreDefDefaults=
function(a){var b=this;void 0!==a.criteria&&void 0===a.logic&&(a.logic="AND");for(var d=function(n){void 0!==n.criteria?n=e._applyPreDefDefaults(n):e.s.dt.columns().every(function(q){b.s.dt.settings()[0].aoColumns[q].sTitle===n.data&&(n.dataIdx=q)})},e=this,f=0,g=a.criteria;f<g.length;f++)d(g[f]);return a};c.prototype._setUp=function(a){var b=this;void 0===a&&(a=!0);x.fn.DataTable.Api.registerPlural("columns().type()","column().type()",function(){return this.iterator("column",function(n,q){return n.aoColumns[q].sType},
1)});if(!C.DateTime){var d=this.s.dt.columns().type().toArray();if(void 0===d||d.includes(void 0)||d.includes(null)){d=[];for(var e=0,f=this.s.dt.settings()[0].aoColumns;e<f.length;e++){var g=f[e];d.push(void 0!==g.searchBuilderType?g.searchBuilderType:g.sType)}}e=this.s.dt.columns().toArray();if(void 0===d||d.includes(void 0)||d.includes(null))x.fn.dataTable.ext.oApi._fnColumnTypes(this.s.dt.settings()[0]),d=this.s.dt.columns().type().toArray();for(f=0;f<e[0].length;f++)if(g=d[e[0][f]],(!0===this.c.columns||
Array.isArray(this.c.columns)&&this.c.columns.includes(f))&&(g.includes("date")||g.includes("moment")||g.includes("luxon")))throw alert("SearchBuilder Requires DateTime when used with dates."),Error("SearchBuilder requires DateTime");}this.s.topGroup=new G(this.s.dt,this.c,void 0);this._setClearListener();this.s.dt.on("stateSaveParams.dtsb",function(n,q,u){u.searchBuilder=b.getDetails();u.page=b.s.dt.page()});this.s.dt.on("stateLoadParams.dtsb",function(n,q,u){b.rebuild(u.searchBuilder)});this._build();
this.s.dt.on("preXhr.dtsb",function(n,q,u){b.s.dt.page.info().serverSide&&(u.searchBuilder=b._collapseArray(b.getDetails(!0)))});this.s.dt.on("column-reorder",function(){b.rebuild(b.getDetails())});a&&(a=this.s.dt.state.loaded(),null!==a&&void 0!==a.searchBuilder?(this.s.topGroup.rebuild(a.searchBuilder),this.s.topGroup.dom.container.trigger("dtsb-redrawContents"),this.s.dt.page.info().serverSide||this.s.dt.page(a.page).draw("page"),this.s.topGroup.setListeners()):!1!==this.c.preDefined&&(this.c.preDefined=
this._applyPreDefDefaults(this.c.preDefined),this.rebuild(this.c.preDefined)));this._setEmptyListener();this.s.dt.state.save()};c.prototype._collapseArray=function(a){if(void 0===a.logic)void 0!==a.value&&(a.value.sort(function(d,e){isNaN(+d)||(d=+d,e=+e);return d<e?-1:e<d?1:0}),a.value1=a.value[0],a.value2=a.value[1]);else for(var b=0;b<a.criteria.length;b++)a.criteria[b]=this._collapseArray(a.criteria[b]);return a};c.prototype._updateTitle=function(a){this.dom.title.html(this.s.dt.i18n("searchBuilder.title",
this.c.i18n.title,a))};c.prototype._build=function(){var a=this;this.dom.clearAll.remove();this.dom.container.empty();var b=this.s.topGroup.count();this._updateTitle(b);this.dom.titleRow.append(this.dom.title);this.dom.container.append(this.dom.titleRow);this.dom.topGroup=this.s.topGroup.getNode();this.dom.container.append(this.dom.topGroup);this._setRedrawListener();var d=this.s.dt.table(0).node();x.fn.dataTable.ext.search.includes(this.s.search)||(this.s.search=function(e,f,g){return e.nTable!==
d?!0:a.s.topGroup.search(f,g)},x.fn.dataTable.ext.search.push(this.s.search));this.s.dt.on("destroy.dtsb",function(){a.dom.container.remove();a.dom.clearAll.remove();for(var e=x.fn.dataTable.ext.search.indexOf(a.s.search);-1!==e;)x.fn.dataTable.ext.search.splice(e,1),e=x.fn.dataTable.ext.search.indexOf(a.s.search);a.s.dt.off(".dtsb");x(a.s.dt.table().node()).off(".dtsb")})};c.prototype._checkClear=function(){0<this.s.topGroup.s.criteria.length?(this.dom.clearAll.insertAfter(this.dom.title),this._setClearListener()):
this.dom.clearAll.remove()};c.prototype._filterChanged=function(a){var b=this.c.filterChanged;"function"===typeof b&&b(a,this.s.dt.i18n("searchBuilder.button",this.c.i18n.button,a))};c.prototype._setClearListener=function(){var a=this;this.dom.clearAll.unbind("click");this.dom.clearAll.on("click.dtsb",function(){a.s.topGroup=new G(a.s.dt,a.c,void 0);a._build();a.s.dt.draw();a.s.topGroup.setListeners();a.dom.clearAll.remove();a._setEmptyListener();a._filterChanged(0);return!1})};c.prototype._setRedrawListener=
function(){var a=this;this.s.topGroup.dom.container.unbind("dtsb-redrawContents");this.s.topGroup.dom.container.on("dtsb-redrawContents.dtsb",function(){a._checkClear();a.s.topGroup.redrawContents();a.s.topGroup.setupLogic();a._setEmptyListener();var b=a.s.topGroup.count();a._updateTitle(b);a._filterChanged(b);a.s.dt.page.info().serverSide||a.s.dt.draw();a.s.dt.state.save()});this.s.topGroup.dom.container.unbind("dtsb-redrawLogic");this.s.topGroup.dom.container.on("dtsb-redrawLogic.dtsb",function(){a.s.topGroup.redrawLogic();
var b=a.s.topGroup.count();a._updateTitle(b);a._filterChanged(b)});this.s.topGroup.dom.container.unbind("dtsb-add");this.s.topGroup.dom.container.on("dtsb-add.dtsb",function(){var b=a.s.topGroup.count();a._updateTitle(b);a._filterChanged(b)});this.s.dt.on("postEdit.dtsb postCreate.dtsb postRemove.dtsb",function(){a.s.topGroup.redrawContents()});this.s.topGroup.dom.container.unbind("dtsb-clearContents");this.s.topGroup.dom.container.on("dtsb-clearContents.dtsb",function(){a._setUp(!1);a._filterChanged(0);
a.s.dt.draw()})};c.prototype._setEmptyListener=function(){var a=this;this.s.topGroup.dom.add.on("click.dtsb",function(){a._checkClear()});this.s.topGroup.dom.container.on("dtsb-destroy.dtsb",function(){a.dom.clearAll.remove()})};c.version="1.3.1";c.classes={button:"dtsb-button",clearAll:"dtsb-clearAll",container:"dtsb-searchBuilder",inputButton:"dtsb-iptbtn",title:"dtsb-title",titleRow:"dtsb-titleRow"};c.defaults={columns:!0,conditions:{date:r.dateConditions,html:r.stringConditions,"html-num":r.numConditions,
"html-num-fmt":r.numFmtConditions,luxon:r.luxonDateConditions,moment:r.momentDateConditions,num:r.numConditions,"num-fmt":r.numFmtConditions,string:r.stringConditions},depthLimit:!1,enterSearch:!1,filterChanged:void 0,greyscale:!1,i18n:{add:"Add Condition",button:{0:"Search Builder",_:"Search Builder (%d)"},clearAll:"Clear All",condition:"Condition",conditions:{array:{contains:"Contains",empty:"Empty",equals:"Equals",not:"Not",notEmpty:"Not Empty",without:"Without"},date:{after:"After",before:"Before",
between:"Between",empty:"Empty",equals:"Equals",not:"Not",notBetween:"Not Between",notEmpty:"Not Empty"},number:{between:"Between",empty:"Empty",equals:"Equals",gt:"Greater Than",gte:"Greater Than Equal To",lt:"Less Than",lte:"Less Than Equal To",not:"Not",notBetween:"Not Between",notEmpty:"Not Empty"},string:{contains:"Contains",empty:"Empty",endsWith:"Ends With",equals:"Equals",not:"Not",notContains:"Does Not Contain",notEmpty:"Not Empty",notEndsWith:"Does Not End With",notStartsWith:"Does Not Start With",
startsWith:"Starts With"}},data:"Data","delete":"×",deleteTitle:"Delete filtering rule",left:"<",leftTitle:"Outdent criteria",logicAnd:"And",logicOr:"Or",right:">",rightTitle:"Indent criteria",title:{0:"Custom Search Builder",_:"Custom Search Builder (%d)"},value:"Value",valueJoiner:"and"},logic:"AND",orthogonal:{display:"display",search:"filter"},preDefined:!1};return c}();(function(c){"function"===typeof define&&define.amd?define(["jquery","datatables.net"],function(a){return c(a,window,document)}):
"object"===typeof exports?module.exports=function(a,b){a||(a=window);b&&b.fn.dataTable||(b=require("datatables.net")(a,b).$);return c(b,a,a.document)}:c(window.jQuery,window,document)})(function(c,a,b){function d(f,g){f=new e.Api(f);g=g?g:f.init().searchBuilder||e.defaults.searchBuilder;return(new I(f,g)).getNode()}l(c);m(c);k(c);var e=c.fn.dataTable;c.fn.dataTable.SearchBuilder=I;c.fn.DataTable.SearchBuilder=I;c.fn.dataTable.Group=G;c.fn.DataTable.Group=G;c.fn.dataTable.Criteria=r;c.fn.DataTable.Criteria=
r;a=c.fn.dataTable.Api.register;c.fn.dataTable.ext.searchBuilder={conditions:{}};c.fn.dataTable.ext.buttons.searchBuilder={action:function(f,g,n,q){this.popover(q._searchBuilder.getNode(),{align:"container",span:"container"});void 0!==q._searchBuilder.s.topGroup&&q._searchBuilder.s.topGroup.dom.container.trigger("dtsb-redrawContents");0===q._searchBuilder.s.topGroup.s.criteria.length&&c("."+c.fn.dataTable.Group.classes.add).click()},config:{},init:function(f,g,n){var q=new c.fn.dataTable.SearchBuilder(f,
c.extend({filterChanged:function(u,D){f.button(g).text(D)}},n.config));f.button(g).text(n.text||f.i18n("searchBuilder.button",q.c.i18n.button,0));n._searchBuilder=q},text:null};a("searchBuilder.getDetails()",function(f){void 0===f&&(f=!1);var g=this.context[0];return g._searchBuilder?g._searchBuilder.getDetails(f):null});a("searchBuilder.rebuild()",function(f){var g=this.context[0];if(void 0===g._searchBuilder)return null;g._searchBuilder.rebuild(f);return this});a("searchBuilder.container()",function(){var f=
this.context[0];return f._searchBuilder?f._searchBuilder.getNode():null});c(b).on("preInit.dt.dtsp",function(f,g){"dt"===f.namespace&&(g.oInit.searchBuilder||e.defaults.searchBuilder)&&(g._searchBuilder||d(g))});e.ext.feature.push({cFeature:"Q",fnInit:d});e.ext.features&&e.ext.features.register("searchBuilder",d)})})(); | PypiClean |
/MyoSuite-2.0.1-py3-none-any.whl/myosuite/envs/myo/myobase/obj_hold_v0.py | import collections
import numpy as np
import gym
from myosuite.envs.myo.base_v0 import BaseV0
class ObjHoldFixedEnvV0(BaseV0):
DEFAULT_OBS_KEYS = ['hand_qpos', 'hand_qvel', 'obj_pos', 'obj_err']
DEFAULT_RWD_KEYS_AND_WEIGHTS = {
"goal_dist": 100.0,
"bonus": 4.0,
"penalty": 10,
}
def __init__(self, model_path, obsd_model_path=None, seed=None, **kwargs):
# EzPickle.__init__(**locals()) is capturing the input dictionary of the init method of this class.
# In order to successfully capture all arguments we need to call gym.utils.EzPickle.__init__(**locals())
# at the leaf level, when we do inheritance like we do here.
# kwargs is needed at the top level to account for injection of __class__ keyword.
# Also see: https://github.com/openai/gym/pull/1497
gym.utils.EzPickle.__init__(self, model_path, obsd_model_path, seed, **kwargs)
# This two step construction is required for pickling to work correctly. All arguments to all __init__
# calls must be pickle friendly. Things like sim / sim_obsd are NOT pickle friendly. Therefore we
# first construct the inheritance chain, which is just __init__ calls all the way down, with env_base
# creating the sim / sim_obsd instances. Next we run through "setup" which relies on sim / sim_obsd
# created in __init__ to complete the setup.
super().__init__(model_path=model_path, obsd_model_path=obsd_model_path, seed=seed, env_credits=self.MYO_CREDIT)
self._setup(**kwargs)
def _setup(self,
obs_keys:list = DEFAULT_OBS_KEYS,
weighted_reward_keys:list = DEFAULT_RWD_KEYS_AND_WEIGHTS,
**kwargs,
):
self.object_sid = self.sim.model.site_name2id("object")
self.goal_sid = self.sim.model.site_name2id("goal")
self.object_init_pos = self.sim.data.site_xpos[self.object_sid].copy()
super()._setup(obs_keys=obs_keys,
weighted_reward_keys=weighted_reward_keys,
**kwargs,
)
self.init_qpos[:-7] *= 0 # Use fully open as init pos
self.init_qpos[0] = -1.5 # place palm up
def get_obs_vec(self):
self.obs_dict['time'] = np.array([self.sim.data.time])
self.obs_dict['hand_qpos'] = self.sim.data.qpos[:-7].copy()
self.obs_dict['hand_qvel'] = self.sim.data.qvel[:-6].copy()*self.dt
self.obs_dict['obj_pos'] = self.sim.data.site_xpos[self.object_sid]
self.obs_dict['obj_err'] = self.sim.data.site_xpos[self.goal_sid] - self.sim.data.site_xpos[self.object_sid]
if self.sim.model.na>0:
self.obs_dict['act'] = self.sim.data.act[:].copy()
t, obs = self.obsdict2obsvec(self.obs_dict, self.obs_keys)
return obs
def get_obs_dict(self, sim):
obs_dict = {}
obs_dict['time'] = np.array([sim.data.time])
obs_dict['hand_qpos'] = sim.data.qpos[:-7].copy()
obs_dict['hand_qvel'] = sim.data.qvel[:-6].copy()*self.dt
obs_dict['obj_pos'] = sim.data.site_xpos[self.object_sid]
obs_dict['obj_err'] = sim.data.site_xpos[self.goal_sid] - sim.data.site_xpos[self.object_sid]
if sim.model.na>0:
obs_dict['act'] = sim.data.act[:].copy()
return obs_dict
def get_reward_dict(self, obs_dict):
goal_dist = np.abs(np.linalg.norm(self.obs_dict['obj_err'], axis=-1)) #-0.040)
act_mag = np.linalg.norm(self.obs_dict['act'], axis=-1)/self.sim.model.na if self.sim.model.na !=0 else 0
gaol_th = .010
drop = goal_dist > 0.300
rwd_dict = collections.OrderedDict((
# Optional Keys
('goal_dist', -1.*goal_dist),
('bonus', 1.*(goal_dist<2*gaol_th) + 1.*(goal_dist<gaol_th)),
('act_reg', -1.*act_mag),
('penalty', -1.*drop),
# Must keys
('sparse', -goal_dist),
('solved', goal_dist<gaol_th),
('done', drop),
))
rwd_dict['dense'] = np.sum([wt*rwd_dict[key] for key, wt in self.rwd_keys_wt.items()], axis=0)
return rwd_dict
class ObjHoldRandomEnvV0(ObjHoldFixedEnvV0):
def reset(self):
# randomize target pos
self.sim.model.site_pos[self.goal_sid] = self.object_init_pos + self.np_random.uniform(high=np.array([0.030, 0.030, 0.030]), low=np.array([-.030, -.030, -.030]))
# randomize object
size = self.np_random.uniform(high=np.array([0.030, 0.030, 0.030]), low=np.array([.020, .020, .020]))
self.sim.model.geom_size[-1] = size
self.sim.model.site_size[self.goal_sid] = size
self.robot.sync_sims(self.sim, self.sim_obsd)
obs = super().reset()
return obs | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/pouchdb/dist/pouchdb.find.min.js | !function e(t,n,r){function i(u,s){if(!n[u]){if(!t[u]){var a="function"==typeof require&&require;if(!s&&a)return a(u,!0);if(o)return o(u,!0);var c=new Error("Cannot find module '"+u+"'");throw c.code="MODULE_NOT_FOUND",c}var f=n[u]={exports:{}};t[u][0].call(f.exports,function(e){var n=t[u][1][e];return i(n||e)},f,f.exports,e,t,n,r)}return n[u].exports}for(var o="function"==typeof require&&require,u=0;u<r.length;u++)i(r[u]);return i}({1:[function(e,t,n){"use strict";t.exports=function(e){return function(){var t=arguments.length;if(t){for(var n=[],r=-1;++r<t;)n[r]=arguments[r];return e.call(this,n)}return e.call(this,[])}}},{}],2:[function(e,t,n){function r(){this._events=this._events||{},this._maxListeners=this._maxListeners||void 0}function i(e){return"function"==typeof e}function o(e){return"object"==typeof e&&null!==e}function u(e){return void 0===e}t.exports=r,r.EventEmitter=r,r.prototype._events=void 0,r.prototype._maxListeners=void 0,r.defaultMaxListeners=10,r.prototype.setMaxListeners=function(e){if(!function(e){return"number"==typeof e}(e)||e<0||isNaN(e))throw TypeError("n must be a positive number");return this._maxListeners=e,this},r.prototype.emit=function(e){var t,n,r,s,a,c;if(this._events||(this._events={}),"error"===e&&(!this._events.error||o(this._events.error)&&!this._events.error.length)){if((t=arguments[1])instanceof Error)throw t;var f=new Error('Uncaught, unspecified "error" event. ('+t+")");throw f.context=t,f}if(n=this._events[e],u(n))return!1;if(i(n))switch(arguments.length){case 1:n.call(this);break;case 2:n.call(this,arguments[1]);break;case 3:n.call(this,arguments[1],arguments[2]);break;default:s=Array.prototype.slice.call(arguments,1),n.apply(this,s)}else if(o(n))for(s=Array.prototype.slice.call(arguments,1),r=(c=n.slice()).length,a=0;a<r;a++)c[a].apply(this,s);return!0},r.prototype.addListener=function(e,t){var n;if(!i(t))throw TypeError("listener must be a function");return this._events||(this._events={}),this._events.newListener&&this.emit("newListener",e,i(t.listener)?t.listener:t),this._events[e]?o(this._events[e])?this._events[e].push(t):this._events[e]=[this._events[e],t]:this._events[e]=t,o(this._events[e])&&!this._events[e].warned&&(n=u(this._maxListeners)?r.defaultMaxListeners:this._maxListeners)&&n>0&&this._events[e].length>n&&(this._events[e].warned=!0,console.error("(node) warning: possible EventEmitter memory leak detected. %d listeners added. Use emitter.setMaxListeners() to increase limit.",this._events[e].length),"function"==typeof console.trace&&console.trace()),this},r.prototype.on=r.prototype.addListener,r.prototype.once=function(e,t){function n(){this.removeListener(e,n),r||(r=!0,t.apply(this,arguments))}if(!i(t))throw TypeError("listener must be a function");var r=!1;return n.listener=t,this.on(e,n),this},r.prototype.removeListener=function(e,t){var n,r,u,s;if(!i(t))throw TypeError("listener must be a function");if(!this._events||!this._events[e])return this;if(n=this._events[e],u=n.length,r=-1,n===t||i(n.listener)&&n.listener===t)delete this._events[e],this._events.removeListener&&this.emit("removeListener",e,t);else if(o(n)){for(s=u;s-- >0;)if(n[s]===t||n[s].listener&&n[s].listener===t){r=s;break}if(r<0)return this;1===n.length?(n.length=0,delete this._events[e]):n.splice(r,1),this._events.removeListener&&this.emit("removeListener",e,t)}return this},r.prototype.removeAllListeners=function(e){var t,n;if(!this._events)return this;if(!this._events.removeListener)return 0===arguments.length?this._events={}:this._events[e]&&delete this._events[e],this;if(0===arguments.length){for(t in this._events)"removeListener"!==t&&this.removeAllListeners(t);return this.removeAllListeners("removeListener"),this._events={},this}if(n=this._events[e],i(n))this.removeListener(e,n);else if(n)for(;n.length;)this.removeListener(e,n[n.length-1]);return delete this._events[e],this},r.prototype.listeners=function(e){return this._events&&this._events[e]?i(this._events[e])?[this._events[e]]:this._events[e].slice():[]},r.prototype.listenerCount=function(e){if(this._events){var t=this._events[e];if(i(t))return 1;if(t)return t.length}return 0},r.listenerCount=function(e,t){return e.listenerCount(t)}},{}],3:[function(e,t,n){(function(e){"use strict";function n(){c=!0;for(var e,t,n=f.length;n;){for(t=f,f=[],e=-1;++e<n;)t[e]();n=f.length}c=!1}var r,i=e.MutationObserver||e.WebKitMutationObserver;if(i){var o=0,u=new i(n),s=e.document.createTextNode("");u.observe(s,{characterData:!0}),r=function(){s.data=o=++o%2}}else if(e.setImmediate||void 0===e.MessageChannel)r="document"in e&&"onreadystatechange"in e.document.createElement("script")?function(){var t=e.document.createElement("script");t.onreadystatechange=function(){n(),t.onreadystatechange=null,t.parentNode.removeChild(t),t=null},e.document.documentElement.appendChild(t)}:function(){setTimeout(n,0)};else{var a=new e.MessageChannel;a.port1.onmessage=n,r=function(){a.port2.postMessage(0)}}var c,f=[];t.exports=function(e){1!==f.push(e)||c||r()}}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],4:[function(e,t,n){"function"==typeof Object.create?t.exports=function(e,t){e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}})}:t.exports=function(e,t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}},{}],5:[function(e,t,n){"use strict";function r(){}function i(e){if("function"!=typeof e)throw new TypeError("resolver must be a function");this.state=p,this.queue=[],this.outcome=void 0,e!==r&&a(this,e)}function o(e,t,n){this.promise=e,"function"==typeof t&&(this.onFulfilled=t,this.callFulfilled=this.otherCallFulfilled),"function"==typeof n&&(this.onRejected=n,this.callRejected=this.otherCallRejected)}function u(e,t,n){f(function(){var r;try{r=t(n)}catch(t){return l.reject(e,t)}r===e?l.reject(e,new TypeError("Cannot resolve promise with itself")):l.resolve(e,r)})}function s(e){var t=e&&e.then;if(e&&("object"==typeof e||"function"==typeof e)&&"function"==typeof t)return function(){t.apply(e,arguments)}}function a(e,t){function n(t){i||(i=!0,l.reject(e,t))}function r(t){i||(i=!0,l.resolve(e,t))}var i=!1,o=c(function(){t(r,n)});"error"===o.status&&n(o.value)}function c(e,t){var n={};try{n.value=e(t),n.status="success"}catch(e){n.status="error",n.value=e}return n}var f=e(3),l={},d=["REJECTED"],h=["FULFILLED"],p=["PENDING"];t.exports=i,i.prototype.catch=function(e){return this.then(null,e)},i.prototype.then=function(e,t){if("function"!=typeof e&&this.state===h||"function"!=typeof t&&this.state===d)return this;var n=new this.constructor(r);if(this.state!==p){u(n,this.state===h?e:t,this.outcome)}else this.queue.push(new o(n,e,t));return n},o.prototype.callFulfilled=function(e){l.resolve(this.promise,e)},o.prototype.otherCallFulfilled=function(e){u(this.promise,this.onFulfilled,e)},o.prototype.callRejected=function(e){l.reject(this.promise,e)},o.prototype.otherCallRejected=function(e){u(this.promise,this.onRejected,e)},l.resolve=function(e,t){var n=c(s,t);if("error"===n.status)return l.reject(e,n.value);var r=n.value;if(r)a(e,r);else{e.state=h,e.outcome=t;for(var i=-1,o=e.queue.length;++i<o;)e.queue[i].callFulfilled(t)}return e},l.reject=function(e,t){e.state=d,e.outcome=t;for(var n=-1,r=e.queue.length;++n<r;)e.queue[n].callRejected(t);return e},i.resolve=function(e){return e instanceof this?e:l.resolve(new this(r),e)},i.reject=function(e){var t=new this(r);return l.reject(t,e)},i.all=function(e){var t=this;if("[object Array]"!==Object.prototype.toString.call(e))return this.reject(new TypeError("must be an array"));var n=e.length,i=!1;if(!n)return this.resolve([]);for(var o=new Array(n),u=0,s=-1,a=new this(r);++s<n;)!function(e,r){t.resolve(e).then(function(e){o[r]=e,++u!==n||i||(i=!0,l.resolve(a,o))},function(e){i||(i=!0,l.reject(a,e))})}(e[s],s);return a},i.race=function(e){var t=this;if("[object Array]"!==Object.prototype.toString.call(e))return this.reject(new TypeError("must be an array"));var n=e.length,i=!1;if(!n)return this.resolve([]);for(var o=-1,u=new this(r);++o<n;)!function(e){t.resolve(e).then(function(e){i||(i=!0,l.resolve(u,e))},function(e){i||(i=!0,l.reject(u,e))})}(e[o]);return u}},{3:3}],6:[function(e,t,n){function r(){throw new Error("setTimeout has not been defined")}function i(){throw new Error("clearTimeout has not been defined")}function o(e){if(f===setTimeout)return setTimeout(e,0);if((f===r||!f)&&setTimeout)return f=setTimeout,setTimeout(e,0);try{return f(e,0)}catch(t){try{return f.call(null,e,0)}catch(t){return f.call(this,e,0)}}}function u(){v&&h&&(v=!1,h.length?p=h.concat(p):y=-1,p.length&&s())}function s(){if(!v){var e=o(u);v=!0;for(var t=p.length;t;){for(h=p,p=[];++y<t;)h&&h[y].run();y=-1,t=p.length}h=null,v=!1,function(e){if(l===clearTimeout)return clearTimeout(e);if((l===i||!l)&&clearTimeout)return l=clearTimeout,clearTimeout(e);try{l(e)}catch(t){try{return l.call(null,e)}catch(t){return l.call(this,e)}}}(e)}}function a(e,t){this.fun=e,this.array=t}function c(){}var f,l,d=t.exports={};!function(){try{f="function"==typeof setTimeout?setTimeout:r}catch(e){f=r}try{l="function"==typeof clearTimeout?clearTimeout:i}catch(e){l=i}}();var h,p=[],v=!1,y=-1;d.nextTick=function(e){var t=new Array(arguments.length-1);if(arguments.length>1)for(var n=1;n<arguments.length;n++)t[n-1]=arguments[n];p.push(new a(e,t)),1!==p.length||v||o(s)},a.prototype.run=function(){this.fun.apply(null,this.array)},d.title="browser",d.browser=!0,d.env={},d.argv=[],d.version="",d.versions={},d.on=c,d.addListener=c,d.once=c,d.off=c,d.removeListener=c,d.removeAllListeners=c,d.emit=c,d.prependListener=c,d.prependOnceListener=c,d.listeners=function(e){return[]},d.binding=function(e){throw new Error("process.binding is not supported")},d.cwd=function(){return"/"},d.chdir=function(e){throw new Error("process.chdir is not supported")},d.umask=function(){return 0}},{}],7:[function(e,t,n){!function(e){if("object"==typeof n)t.exports=e();else if("function"==typeof define&&define.amd)define(e);else{var r;try{r=window}catch(e){r=self}r.SparkMD5=e()}}(function(e){"use strict";function t(e,t){var n=e[0],r=e[1],i=e[2],o=e[3];r=((r+=((i=((i+=((o=((o+=((n=((n+=(r&i|~r&o)+t[0]-680876936|0)<<7|n>>>25)+r|0)&r|~n&i)+t[1]-389564586|0)<<12|o>>>20)+n|0)&n|~o&r)+t[2]+606105819|0)<<17|i>>>15)+o|0)&o|~i&n)+t[3]-1044525330|0)<<22|r>>>10)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r&i|~r&o)+t[4]-176418897|0)<<7|n>>>25)+r|0)&r|~n&i)+t[5]+1200080426|0)<<12|o>>>20)+n|0)&n|~o&r)+t[6]-1473231341|0)<<17|i>>>15)+o|0)&o|~i&n)+t[7]-45705983|0)<<22|r>>>10)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r&i|~r&o)+t[8]+1770035416|0)<<7|n>>>25)+r|0)&r|~n&i)+t[9]-1958414417|0)<<12|o>>>20)+n|0)&n|~o&r)+t[10]-42063|0)<<17|i>>>15)+o|0)&o|~i&n)+t[11]-1990404162|0)<<22|r>>>10)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r&i|~r&o)+t[12]+1804603682|0)<<7|n>>>25)+r|0)&r|~n&i)+t[13]-40341101|0)<<12|o>>>20)+n|0)&n|~o&r)+t[14]-1502002290|0)<<17|i>>>15)+o|0)&o|~i&n)+t[15]+1236535329|0)<<22|r>>>10)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r&o|i&~o)+t[1]-165796510|0)<<5|n>>>27)+r|0)&i|r&~i)+t[6]-1069501632|0)<<9|o>>>23)+n|0)&r|n&~r)+t[11]+643717713|0)<<14|i>>>18)+o|0)&n|o&~n)+t[0]-373897302|0)<<20|r>>>12)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r&o|i&~o)+t[5]-701558691|0)<<5|n>>>27)+r|0)&i|r&~i)+t[10]+38016083|0)<<9|o>>>23)+n|0)&r|n&~r)+t[15]-660478335|0)<<14|i>>>18)+o|0)&n|o&~n)+t[4]-405537848|0)<<20|r>>>12)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r&o|i&~o)+t[9]+568446438|0)<<5|n>>>27)+r|0)&i|r&~i)+t[14]-1019803690|0)<<9|o>>>23)+n|0)&r|n&~r)+t[3]-187363961|0)<<14|i>>>18)+o|0)&n|o&~n)+t[8]+1163531501|0)<<20|r>>>12)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r&o|i&~o)+t[13]-1444681467|0)<<5|n>>>27)+r|0)&i|r&~i)+t[2]-51403784|0)<<9|o>>>23)+n|0)&r|n&~r)+t[7]+1735328473|0)<<14|i>>>18)+o|0)&n|o&~n)+t[12]-1926607734|0)<<20|r>>>12)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r^i^o)+t[5]-378558|0)<<4|n>>>28)+r|0)^r^i)+t[8]-2022574463|0)<<11|o>>>21)+n|0)^n^r)+t[11]+1839030562|0)<<16|i>>>16)+o|0)^o^n)+t[14]-35309556|0)<<23|r>>>9)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r^i^o)+t[1]-1530992060|0)<<4|n>>>28)+r|0)^r^i)+t[4]+1272893353|0)<<11|o>>>21)+n|0)^n^r)+t[7]-155497632|0)<<16|i>>>16)+o|0)^o^n)+t[10]-1094730640|0)<<23|r>>>9)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r^i^o)+t[13]+681279174|0)<<4|n>>>28)+r|0)^r^i)+t[0]-358537222|0)<<11|o>>>21)+n|0)^n^r)+t[3]-722521979|0)<<16|i>>>16)+o|0)^o^n)+t[6]+76029189|0)<<23|r>>>9)+i|0,r=((r+=((i=((i+=((o=((o+=((n=((n+=(r^i^o)+t[9]-640364487|0)<<4|n>>>28)+r|0)^r^i)+t[12]-421815835|0)<<11|o>>>21)+n|0)^n^r)+t[15]+530742520|0)<<16|i>>>16)+o|0)^o^n)+t[2]-995338651|0)<<23|r>>>9)+i|0,r=((r+=((o=((o+=(r^((n=((n+=(i^(r|~o))+t[0]-198630844|0)<<6|n>>>26)+r|0)|~i))+t[7]+1126891415|0)<<10|o>>>22)+n|0)^((i=((i+=(n^(o|~r))+t[14]-1416354905|0)<<15|i>>>17)+o|0)|~n))+t[5]-57434055|0)<<21|r>>>11)+i|0,r=((r+=((o=((o+=(r^((n=((n+=(i^(r|~o))+t[12]+1700485571|0)<<6|n>>>26)+r|0)|~i))+t[3]-1894986606|0)<<10|o>>>22)+n|0)^((i=((i+=(n^(o|~r))+t[10]-1051523|0)<<15|i>>>17)+o|0)|~n))+t[1]-2054922799|0)<<21|r>>>11)+i|0,r=((r+=((o=((o+=(r^((n=((n+=(i^(r|~o))+t[8]+1873313359|0)<<6|n>>>26)+r|0)|~i))+t[15]-30611744|0)<<10|o>>>22)+n|0)^((i=((i+=(n^(o|~r))+t[6]-1560198380|0)<<15|i>>>17)+o|0)|~n))+t[13]+1309151649|0)<<21|r>>>11)+i|0,r=((r+=((o=((o+=(r^((n=((n+=(i^(r|~o))+t[4]-145523070|0)<<6|n>>>26)+r|0)|~i))+t[11]-1120210379|0)<<10|o>>>22)+n|0)^((i=((i+=(n^(o|~r))+t[2]+718787259|0)<<15|i>>>17)+o|0)|~n))+t[9]-343485551|0)<<21|r>>>11)+i|0,e[0]=n+e[0]|0,e[1]=r+e[1]|0,e[2]=i+e[2]|0,e[3]=o+e[3]|0}function n(e){var t,n=[];for(t=0;t<64;t+=4)n[t>>2]=e.charCodeAt(t)+(e.charCodeAt(t+1)<<8)+(e.charCodeAt(t+2)<<16)+(e.charCodeAt(t+3)<<24);return n}function r(e){var t,n=[];for(t=0;t<64;t+=4)n[t>>2]=e[t]+(e[t+1]<<8)+(e[t+2]<<16)+(e[t+3]<<24);return n}function i(e){var r,i,o,u,s,a,c=e.length,f=[1732584193,-271733879,-1732584194,271733878];for(r=64;r<=c;r+=64)t(f,n(e.substring(r-64,r)));for(i=(e=e.substring(r-64)).length,o=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],r=0;r<i;r+=1)o[r>>2]|=e.charCodeAt(r)<<(r%4<<3);if(o[r>>2]|=128<<(r%4<<3),r>55)for(t(f,o),r=0;r<16;r+=1)o[r]=0;return u=8*c,u=u.toString(16).match(/(.*?)(.{0,8})$/),s=parseInt(u[2],16),a=parseInt(u[1],16)||0,o[14]=s,o[15]=a,t(f,o),f}function o(e){var t,n="";for(t=0;t<4;t+=1)n+=f[e>>8*t+4&15]+f[e>>8*t&15];return n}function u(e){var t;for(t=0;t<e.length;t+=1)e[t]=o(e[t]);return e.join("")}function s(e){return/[\u0080-\uFFFF]/.test(e)&&(e=unescape(encodeURIComponent(e))),e}function a(e){var t,n=[],r=e.length;for(t=0;t<r-1;t+=2)n.push(parseInt(e.substr(t,2),16));return String.fromCharCode.apply(String,n)}function c(){this.reset()}var f=["0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f"];return"5d41402abc4b2a76b9719d911017c592"!==u(i("hello"))&&function(e,t){var n=(65535&e)+(65535&t);return(e>>16)+(t>>16)+(n>>16)<<16|65535&n},"undefined"==typeof ArrayBuffer||ArrayBuffer.prototype.slice||function(){function t(e,t){return(e=0|e||0)<0?Math.max(e+t,0):Math.min(e,t)}ArrayBuffer.prototype.slice=function(n,r){var i,o,u,s,a=this.byteLength,c=t(n,a),f=a;return r!==e&&(f=t(r,a)),c>f?new ArrayBuffer(0):(i=f-c,o=new ArrayBuffer(i),u=new Uint8Array(o),s=new Uint8Array(this,c,i),u.set(s),o)}}(),c.prototype.append=function(e){return this.appendBinary(s(e)),this},c.prototype.appendBinary=function(e){this._buff+=e,this._length+=e.length;var r,i=this._buff.length;for(r=64;r<=i;r+=64)t(this._hash,n(this._buff.substring(r-64,r)));return this._buff=this._buff.substring(r-64),this},c.prototype.end=function(e){var t,n,r=this._buff,i=r.length,o=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0];for(t=0;t<i;t+=1)o[t>>2]|=r.charCodeAt(t)<<(t%4<<3);return this._finish(o,i),n=u(this._hash),e&&(n=a(n)),this.reset(),n},c.prototype.reset=function(){return this._buff="",this._length=0,this._hash=[1732584193,-271733879,-1732584194,271733878],this},c.prototype.getState=function(){return{buff:this._buff,length:this._length,hash:this._hash}},c.prototype.setState=function(e){return this._buff=e.buff,this._length=e.length,this._hash=e.hash,this},c.prototype.destroy=function(){delete this._hash,delete this._buff,delete this._length},c.prototype._finish=function(e,n){var r,i,o,u=n;if(e[u>>2]|=128<<(u%4<<3),u>55)for(t(this._hash,e),u=0;u<16;u+=1)e[u]=0;r=(r=8*this._length).toString(16).match(/(.*?)(.{0,8})$/),i=parseInt(r[2],16),o=parseInt(r[1],16)||0,e[14]=i,e[15]=o,t(this._hash,e)},c.hash=function(e,t){return c.hashBinary(s(e),t)},c.hashBinary=function(e,t){var n=u(i(e));return t?a(n):n},c.ArrayBuffer=function(){this.reset()},c.ArrayBuffer.prototype.append=function(e){var n,i=function(e,t,n){var r=new Uint8Array(e.byteLength+t.byteLength);return r.set(new Uint8Array(e)),r.set(new Uint8Array(t),e.byteLength),n?r:r.buffer}(this._buff.buffer,e,!0),o=i.length;for(this._length+=e.byteLength,n=64;n<=o;n+=64)t(this._hash,r(i.subarray(n-64,n)));return this._buff=n-64<o?new Uint8Array(i.buffer.slice(n-64)):new Uint8Array(0),this},c.ArrayBuffer.prototype.end=function(e){var t,n,r=this._buff,i=r.length,o=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0];for(t=0;t<i;t+=1)o[t>>2]|=r[t]<<(t%4<<3);return this._finish(o,i),n=u(this._hash),e&&(n=a(n)),this.reset(),n},c.ArrayBuffer.prototype.reset=function(){return this._buff=new Uint8Array(0),this._length=0,this._hash=[1732584193,-271733879,-1732584194,271733878],this},c.ArrayBuffer.prototype.getState=function(){var e=c.prototype.getState.call(this);return e.buff=function(e){return String.fromCharCode.apply(null,new Uint8Array(e))}(e.buff),e},c.ArrayBuffer.prototype.setState=function(e){return e.buff=function(e,t){var n,r=e.length,i=new ArrayBuffer(r),o=new Uint8Array(i);for(n=0;n<r;n+=1)o[n]=e.charCodeAt(n);return t?o:i}(e.buff,!0),c.prototype.setState.call(this,e)},c.ArrayBuffer.prototype.destroy=c.prototype.destroy,c.ArrayBuffer.prototype._finish=c.prototype._finish,c.ArrayBuffer.hash=function(e,n){var i=u(function(e){var n,i,o,u,s,a,c=e.length,f=[1732584193,-271733879,-1732584194,271733878];for(n=64;n<=c;n+=64)t(f,r(e.subarray(n-64,n)));for(i=(e=n-64<c?e.subarray(n-64):new Uint8Array(0)).length,o=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],n=0;n<i;n+=1)o[n>>2]|=e[n]<<(n%4<<3);if(o[n>>2]|=128<<(n%4<<3),n>55)for(t(f,o),n=0;n<16;n+=1)o[n]=0;return u=8*c,u=u.toString(16).match(/(.*?)(.{0,8})$/),s=parseInt(u[2],16),a=parseInt(u[1],16)||0,o[14]=s,o[15]=a,t(f,o),f}(new Uint8Array(e)));return n?a(i):i},c})},{}],8:[function(e,t,n){var r=e(11),i=e(12),o=i;o.v1=r,o.v4=i,t.exports=o},{11:11,12:12}],9:[function(e,t,n){for(var r=[],i=0;i<256;++i)r[i]=(i+256).toString(16).substr(1);t.exports=function(e,t){var n=t||0,i=r;return i[e[n++]]+i[e[n++]]+i[e[n++]]+i[e[n++]]+"-"+i[e[n++]]+i[e[n++]]+"-"+i[e[n++]]+i[e[n++]]+"-"+i[e[n++]]+i[e[n++]]+"-"+i[e[n++]]+i[e[n++]]+i[e[n++]]+i[e[n++]]+i[e[n++]]+i[e[n++]]}},{}],10:[function(e,t,n){(function(e){var n,r=e.crypto||e.msCrypto;if(r&&r.getRandomValues){var i=new Uint8Array(16);n=function(){return r.getRandomValues(i),i}}if(!n){var o=new Array(16);n=function(){for(var e,t=0;t<16;t++)0==(3&t)&&(e=4294967296*Math.random()),o[t]=e>>>((3&t)<<3)&255;return o}}t.exports=n}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],11:[function(e,t,n){var r=e(10),i=e(9),o=r(),u=[1|o[0],o[1],o[2],o[3],o[4],o[5]],s=16383&(o[6]<<8|o[7]),a=0,c=0;t.exports=function(e,t,n){var r=t&&n||0,o=t||[],f=void 0!==(e=e||{}).clockseq?e.clockseq:s,l=void 0!==e.msecs?e.msecs:(new Date).getTime(),d=void 0!==e.nsecs?e.nsecs:c+1,h=l-a+(d-c)/1e4;if(h<0&&void 0===e.clockseq&&(f=f+1&16383),(h<0||l>a)&&void 0===e.nsecs&&(d=0),d>=1e4)throw new Error("uuid.v1(): Can't create more than 10M uuids/sec");a=l,c=d,s=f;var p=(1e4*(268435455&(l+=122192928e5))+d)%4294967296;o[r++]=p>>>24&255,o[r++]=p>>>16&255,o[r++]=p>>>8&255,o[r++]=255&p;var v=l/4294967296*1e4&268435455;o[r++]=v>>>8&255,o[r++]=255&v,o[r++]=v>>>24&15|16,o[r++]=v>>>16&255,o[r++]=f>>>8|128,o[r++]=255&f;for(var y=e.node||u,g=0;g<6;++g)o[r+g]=y[g];return t||i(o)}},{10:10,9:9}],12:[function(e,t,n){var r=e(10),i=e(9);t.exports=function(e,t,n){var o=t&&n||0;"string"==typeof e&&(t="binary"==e?new Array(16):null,e=null);var u=(e=e||{}).random||(e.rng||r)();if(u[6]=15&u[6]|64,u[8]=63&u[8]|128,t)for(var s=0;s<16;++s)t[o+s]=u[s];return t||i(u)}},{10:10,9:9}],13:[function(e,t,n){(function(t,n){"use strict";function r(e){return e&&"object"==typeof e&&"default"in e?e.default:e}function i(e){if(e instanceof ArrayBuffer)return function(e){if("function"==typeof e.slice)return e.slice(0);var t=new ArrayBuffer(e.byteLength),n=new Uint8Array(t),r=new Uint8Array(e);return n.set(r),t}(e);var t=e.size,n=e.type;return"function"==typeof e.slice?e.slice(0,t,n):e.webkitSlice(0,t,n)}function o(e){var t,n,r;if(!e||"object"!=typeof e)return e;if(Array.isArray(e)){for(t=[],n=0,r=e.length;n<r;n++)t[n]=o(e[n]);return t}if(e instanceof Date)return e.toISOString();if(function(e){return"undefined"!=typeof ArrayBuffer&&e instanceof ArrayBuffer||"undefined"!=typeof Blob&&e instanceof Blob}(e))return i(e);if(!function(e){var t=Object.getPrototypeOf(e);if(null===t)return!0;var n=t.constructor;return"function"==typeof n&&n instanceof n&&Ce.call(n)==De}(e))return e;t={};for(n in e)if(Object.prototype.hasOwnProperty.call(e,n)){var u=o(e[n]);void 0!==u&&(t[n]=u)}return t}function u(e){return qe(function(t){var n=this,r="function"==typeof(t=o(t))[t.length-1]&&t.pop(),i=new Me(function(r,i){var o;try{var u=function(e){var t=!1;return qe(function(n){if(t)throw new Error("once called more than once");t=!0,e.apply(this,n)})}(function(e,t){e?i(e):r(t)});t.push(u),(o=e.apply(n,t))&&"function"==typeof o.then&&r(o)}catch(e){i(e)}});return r&&i.then(function(e){r(null,e)},r),i})}function s(e){return"$"+e}function a(e){return e.substring(1)}function c(){this._store={}}function f(e){if(this._store=new c,e&&Array.isArray(e))for(var t=0,n=e.length;t<n;t++)this.add(e[t])}function l(){return"undefined"!=typeof chrome&&void 0!==chrome.storage&&void 0!==chrome.storage.local}function d(){return Fe}function h(){Se.EventEmitter.call(this),this._listeners={},function(e){l()?chrome.storage.onChanged.addListener(function(t){null!=t.db_name&&e.emit(t.dbName.newValue)}):d()&&("undefined"!=typeof addEventListener?addEventListener("storage",function(t){e.emit(t.key)}):window.attachEvent("storage",function(t){e.emit(t.key)}))}(this)}function p(e){if("undefined"!=typeof console&&"function"==typeof console[e]){var t=Array.prototype.slice.call(arguments,1);console[e].apply(console,t)}}function v(e,t,n){Error.call(this,n),this.status=e,this.name=t,this.message=n,this.error=!0}function y(e){for(var t=[],n=0,r=e.length;n<r;n++)t=t.concat(e[n]);return t}function g(e){return"boolean"==typeof e._remote?e._remote:"function"==typeof e.type&&(p("warn","db.type() is deprecated and will be removed in a future version of PouchDB"),"http"===e.type())}function m(e,t,n){return new Me(function(r,i){e.get(t,function(o,u){if(o){if(404!==o.status)return i(o);u={}}var s=u._rev,a=n(u);if(!a)return r({updated:!1,rev:s});a._id=t,a._rev=s,r(function(e,t,n){return e.put(t).then(function(e){return{updated:!0,rev:e.rev}},function(r){if(409!==r.status)throw r;return m(e,t._id,n)})}(e,a,n))})})}function _(e){return(e=o(e)).index||(e.index={}),["type","name","ddoc"].forEach(function(t){e.index[t]&&(e[t]=e.index[t],delete e.index[t])}),e.fields&&(e.index.fields=e.fields,delete e.fields),e.type||(e.type="json"),e}function w(e,t,n){t=_(t),e.request({method:"POST",url:"_index",body:t},n)}function b(e,t,n){e.request({method:"POST",url:"_find",body:t},n)}function k(e,t,n){e.request({method:"POST",url:"_explain",body:t},n)}function $(e,t){e.request({method:"GET",url:"_index"},t)}function x(e,t,n){var r=t.ddoc,i=t.type||"json",o=t.name;if(!r)return n(new Error("you must provide an index's ddoc"));if(!o)return n(new Error("you must provide an index's name"));var u="_index/"+[r,i,o].map(encodeURIComponent).join("/");e.request({method:"DELETE",url:u},n)}function j(e,t){for(var n=e,r=0,i=t.length;r<i;r++){if(!(n=n[t[r]]))break}return n}function E(e,t,n){for(var r=0,i=t.length;r<i-1;r++){e=e[t[r]]={}}e[t[i-1]]=n}function O(e,t){return e<t?-1:e>t?1:0}function A(e){for(var t=[],n="",r=0,i=e.length;r<i;r++){var o=e[r];"."===o?r>0&&"\\"===e[r-1]?n=n.substring(0,n.length-1)+".":(t.push(n),n=""):n+=o}return t.push(n),t}function q(e){return Pe.indexOf(e)>-1}function S(e){return Object.keys(e)[0]}function L(e){return e[S(e)]}function B(e){var t={};return e.forEach(function(e){Object.keys(e).forEach(function(n){var r=e[n];if("object"!=typeof r&&(r={$eq:r}),q(n))r instanceof Array?t[n]=r.map(function(e){return B([e])}):t[n]=B([r]);else{var i=t[n]=t[n]||{};Object.keys(r).forEach(function(e){var t=r[e];return"$gt"===e||"$gte"===e?function(e,t,n){if(void 0!==n.$eq)return;void 0!==n.$gte?"$gte"===e?t>n.$gte&&(n.$gte=t):t>=n.$gte&&(delete n.$gte,n.$gt=t):void 0!==n.$gt?"$gte"===e?t>n.$gt&&(delete n.$gt,n.$gte=t):t>n.$gt&&(n.$gt=t):n[e]=t}(e,t,i):"$lt"===e||"$lte"===e?function(e,t,n){if(void 0!==n.$eq)return;void 0!==n.$lte?"$lte"===e?t<n.$lte&&(n.$lte=t):t<=n.$lte&&(delete n.$lte,n.$lt=t):void 0!==n.$lt?"$lte"===e?t<n.$lt&&(delete n.$lt,n.$lte=t):t<n.$lt&&(n.$lt=t):n[e]=t}(e,t,i):"$ne"===e?function(e,t){"$ne"in t?t.$ne.push(e):t.$ne=[e]}(t,i):"$eq"===e?function(e,t){delete t.$gt,delete t.$gte,delete t.$lt,delete t.$lte,delete t.$ne,t.$eq=e}(t,i):void(i[e]=t)})}})}),t}function T(e,t){if(e===t)return 0;e=M(e),t=M(t);var n=F(e),r=F(t);if(n-r!=0)return n-r;switch(typeof e){case"number":return e-t;case"boolean":return e<t?-1:1;case"string":return function(e,t){return e===t?0:e>t?1:-1}(e,t)}return Array.isArray(e)?function(e,t){for(var n=Math.min(e.length,t.length),r=0;r<n;r++){var i=T(e[r],t[r]);if(0!==i)return i}return e.length===t.length?0:e.length>t.length?1:-1}(e,t):function(e,t){for(var n=Object.keys(e),r=Object.keys(t),i=Math.min(n.length,r.length),o=0;o<i;o++){var u=T(n[o],r[o]);if(0!==u)return u;if(0!==(u=T(e[n[o]],t[r[o]])))return u}return n.length===r.length?0:n.length>r.length?1:-1}(e,t)}function M(e){switch(typeof e){case"undefined":return null;case"number":return e===1/0||e===-1/0||isNaN(e)?null:e;case"object":var t=e;if(Array.isArray(e)){var n=e.length;e=new Array(n);for(var r=0;r<n;r++)e[r]=M(t[r])}else{if(e instanceof Date)return e.toJSON();if(null!==e){e={};for(var i in t)if(t.hasOwnProperty(i)){var o=t[i];void 0!==o&&(e[i]=M(o))}}}}return e}function C(e){if(null!==e)switch(typeof e){case"boolean":return e?1:0;case"number":return function(e){if(0===e)return"1";var t=e.toExponential().split(/e\+?/),n=parseInt(t[1],10),r=e<0,i=r?"0":"2",o=function(e,t,n){return function(e,t,n){for(var r="",i=n-e.length;r.length<i;)r+=t;return r}(e,t,n)+e}(((r?-n:n)-Re).toString(),"0",ze);i+=Ve+o;var u=Math.abs(parseFloat(t[0]));r&&(u=10-u);var s=u.toFixed(20);return s=s.replace(/\.?0+$/,""),i+=Ve+s}(e);case"string":return e.replace(/\u0002/g,"").replace(/\u0001/g,"").replace(/\u0000/g,"");case"object":var t=Array.isArray(e),n=t?e:Object.keys(e),r=-1,i=n.length,o="";if(t)for(;++r<i;)o+=D(n[r]);else for(;++r<i;){var u=n[r];o+=D(u)+D(e[u])}return o}return""}function D(e){return e=M(e),F(e)+Ve+C(e)+"\0"}function I(e,t){var n,r=t;if("1"===e[t])n=0,t++;else{var i="0"===e[t];t++;var o="",u=e.substring(t,t+ze),s=parseInt(u,10)+Re;for(i&&(s=-s),t+=ze;;){var a=e[t];if("\0"===a)break;o+=a,t++}n=1===(o=o.split(".")).length?parseInt(o,10):parseFloat(o[0]+"."+o[1]),i&&(n-=10),0!==s&&(n=parseFloat(n+"e"+s))}return{num:n,length:t-r}}function U(e,t){var n=e.pop();if(t.length){var r=t[t.length-1];n===r.element&&(t.pop(),r=t[t.length-1]);var i=r.element,o=r.index;if(Array.isArray(i))i.push(n);else if(o===e.length-2){i[e.pop()]=n}else e.push(n)}}function F(e){var t=["boolean","number","string","object"].indexOf(typeof e);return~t?null===e?1:Array.isArray(e)?5:t<3?t+2:t+3:Array.isArray(e)?5:void 0}function N(e,t,n){if(e=e.filter(function(e){return P(e.doc,t.selector,n)}),t.sort){var r=function(e){function t(t){return e.map(function(e){var n=A(S(e));return j(t,n)})}return function(e,n){var r=T(t(e.doc),t(n.doc));return 0!==r?r:O(e.doc._id,n.doc._id)}}(t.sort);e=e.sort(r),"string"!=typeof t.sort[0]&&"desc"===L(t.sort[0])&&(e=e.reverse())}if("limit"in t||"skip"in t){var i=t.skip||0,o=("limit"in t?t.limit:e.length)+i;e=e.slice(i,o)}return e}function P(e,t,n){return n.every(function(n){var r=t[n],i=A(n),o=j(e,i);return q(n)?function(e,t,n){if("$or"===e)return t.some(function(e){return P(n,e,Object.keys(e))});if("$not"===e)return!P(n,t,Object.keys(t));return!t.find(function(e){return P(n,e,Object.keys(e))})}(n,r,e):R(r,e,i,o)})}function R(e,t,n,r){return!e||Object.keys(e).every(function(i){var o=e[i];return function(e,t,n,r,i){if(!Je[e])throw new Error('unknown operator "'+e+'" - should be one of $eq, $lte, $lt, $gt, $gte, $exists, $ne, $in, $nin, $size, $mod, $regex, $elemMatch, $type, $allMatch or $all');return Je[e](t,n,r,i)}(i,t,o,n,r)})}function z(e){return void 0!==e&&null!==e}function V(e){return void 0!==e}function J(e,t){return t.some(function(t){return e instanceof Array?e.indexOf(t)>-1:e===t})}function K(e){return function(){for(var t=arguments.length,n=new Array(t),r=-1;++r<t;)n[r]=arguments[r];return e.call(this,n)}}function W(e){return K(function(n){var r=n.pop(),i=e.apply(this,n);return function(e,n){e.then(function(e){t.nextTick(function(){n(null,e)})},function(e){t.nextTick(function(){n(e)})})}(i,r),i})}function X(e){for(var t={},n=0,r=e.length;n<r;n++)t=Ne(t,e[n]);return t}function G(e,t){for(var n=0,r=Math.min(e.length,t.length);n<r;n++)if(e[n]!==t[n])return!1;return!0}function Y(e,t){if(e.length!==t.length)return!1;for(var n=0,r=e.length;n<r;n++)if(e[n]!==t[n])return!1;return!0}function H(e,t){return function(e,t){e=e||[],t=t||{};try{return new Blob(e,t)}catch(i){if("TypeError"!==i.name)throw i;for(var n=new("undefined"!=typeof BlobBuilder?BlobBuilder:"undefined"!=typeof MSBlobBuilder?MSBlobBuilder:"undefined"!=typeof MozBlobBuilder?MozBlobBuilder:WebKitBlobBuilder),r=0;r<e.length;r+=1)n.append(e[r]);return n.getBlob(t.type)}}([function(e){for(var t=e.length,n=new ArrayBuffer(t),r=new Uint8Array(n),i=0;i<t;i++)r[i]=e.charCodeAt(i);return n}(e)],{type:t})}function Q(){this.promise=new Me(function(e){e()})}function Z(e){return Te.hash(e)}function ee(e){if(!e)return"undefined";switch(typeof e){case"function":case"string":return e.toString();default:return JSON.stringify(e)}}function te(e,t,n,r,i,o){var u,s=function(e,t){return ee(e)+ee(t)+"undefined"}(n,r);if(!i&&(u=e._cachedViews=e._cachedViews||{})[s])return u[s];var a=e.info().then(function(a){var c=a.db_name+"-mrview-"+(i?"temp":Z(s));return m(e,"_local/"+o,function(e){e.views=e.views||{};var n=t;-1===n.indexOf("/")&&(n=t+"/"+t);var r=e.views[n]=e.views[n]||{};if(!r[c])return r[c]=!0,e}).then(function(){return e.registerDependentDatabase(c).then(function(t){var i=t.db;i.auto_compaction=!0;var o={name:c,db:i,sourceDB:e,adapter:e.adapter,mapFun:n,reduceFun:r};return o.db.get("_local/lastSeq").catch(function(e){if(404!==e.status)throw e}).then(function(e){return o.seq=e?e.seq:0,u&&o.db.once("destroyed",function(){delete u[s]}),o})})})});return u&&(u[s]=a),a}function ne(e){this.status=400,this.name="query_parse_error",this.message=e,this.error=!0;try{Error.captureStackTrace(this,ne)}catch(e){}}function re(e){this.status=404,this.name="not_found",this.message=e,this.error=!0;try{Error.captureStackTrace(this,re)}catch(e){}}function ie(e){this.status=500,this.name="invalid_value",this.message=e,this.error=!0;try{Error.captureStackTrace(this,ie)}catch(e){}}function oe(e,t){return t&&e.then(function(e){Be(function(){t(null,e)})},function(e){Be(function(){t(e)})}),e}function ue(e,t){return function(){var n=arguments,r=this;return e.add(function(){return t.apply(r,n)})}}function se(e){var t=new Ie(e),n=new Array(t.size),r=-1;return t.forEach(function(e){n[++r]=e}),n}function ae(e){var t=new Array(e.size),n=-1;return e.forEach(function(e,r){t[++n]=r}),t}function ce(e){return-1===e.indexOf("/")?[e,e]:e.split("/")}function fe(e,t){try{e.emit("error",t)}catch(e){p("error","The user's map/reduce function threw an uncaught error.\nYou can debug this error by doing:\nmyDatabase.on('error', function (err) { debugger; });\nPlease double-check your map/reduce function."),p("error",t)}}function le(e,t){var n=function(e){for(var t=0,n=e.length;t<n;t++)if(-1!==e[t].indexOf("."))return!1;return!0}(e),r=1===e.length;return n?r?function(e,t){return function(n){t(n[e])}}(e[0],t):function(e,t){return function(n){for(var r=[],i=0,o=e.length;i<o;i++)r.push(n[e[i]]);t(r)}}(e,t):r?function(e,t){var n=A(e);return function(e){for(var r=e,i=0,o=n.length;i<o;i++)if(!(r=r[n[i]]))return;t(r)}}(e[0],t):function(e,t){return function(n){for(var r=[],i=0,o=e.length;i<o;i++){for(var u=A(e[i]),s=n,a=0,c=u.length;a<c;a++)if(!(s=s[u[a]]))return;r.push(s)}t(r)}}(e,t)}function de(e){return e.fields=e.fields.map(function(e){if("string"==typeof e){var t={};return t[e]="asc",t}return e}),e}function he(e,t){for(var n=[],r=0;r<t.def.fields.length;r++){var i=S(t.def.fields[r]);n.push(e[i])}return n}function pe(e){return e.allDocs({startkey:"_design/",endkey:"_design/",include_docs:!0}).then(function(e){var t={indexes:[{ddoc:null,name:"_all_docs",type:"special",def:{fields:[{_id:"asc"}]}}]};return t.indexes=Ke(t.indexes,e.rows.filter(function(e){return"query"===e.doc.language}).map(function(e){return(void 0!==e.doc.views?Object.keys(e.doc.views):[]).map(function(t){var n=e.doc.views[t];return{ddoc:e.id,name:t,type:"json",def:de(n.options.def)}})})),t.indexes.sort(function(e,t){return O(e.name,t.name)}),t.total_rows=t.indexes.length,t})}function ve(e,t){for(var n=e.def.fields.map(S),r=0,i=n.length;r<i;r++){if(t===n[r])return!0}return!1}function ye(e,t){return"$eq"!==S(e[t])}function ge(e,t){var n=t.def.fields.map(S);return e.slice().sort(function(e,t){var r=n.indexOf(e),i=n.indexOf(t);return-1===r&&(r=Number.MAX_VALUE),-1===i&&(i=Number.MAX_VALUE),O(r,i)})}function me(e,t,n,r){return ge(function(e){for(var t={},n=0;n<e.length;n++)t["$"+e[n]]=!0;return Object.keys(t).map(function(e){return e.substring(1)})}(Ke(e,function(e,t,n){for(var r=!1,i=0,o=(n=ge(n,e)).length;i<o;i++){var u=n[i];if(r||!ve(e,u))return n.slice(i);i<o-1&&ye(t,u)&&(r=!0)}return[]}(t,n,r),function(e){var t=[];return Object.keys(e).forEach(function(n){var r=e[n];Object.keys(r).forEach(function(e){"$ne"===e&&t.push(n)})}),t}(n))),t)}function _e(e,t,n){if(t){var r=function(e,t){return!(e.length>t.length)&&G(e,t)}(t,e),i=G(n,e);return r&&i}return function(e,t){e=e.slice();for(var n=0,r=t.length;n<r;n++){var i=t[n];if(!e.length)break;var o=e.indexOf(i);if(-1===o)return!1;e.splice(o,1)}return!0}(n,e)}function we(e){return-1===et.indexOf(e)}function be(e,t,n,r){var i=e.def.fields.map(S);return!!_e(i,t,n)&&function(e,t){var n=t[e[0]];return void 0===n||!!Object.keys(n).some(function(e){return!we(e)})&&!(1===Object.keys(n).length&&"$ne"===S(n))}(i,r)}function ke(e,t,n,r,i){var o=function(e,t,n,r){return r.reduce(function(r,i){return be(i,n,t,e)&&r.push(i),r},[])}(e,t,n,r);if(0===o.length){if(i)throw{error:"no_usable_index",message:"There is no index available for this selector."};var u=r[0];return u.defaultUsed=!0,u}if(1===o.length&&!i)return o[0];var s=function(e){for(var t={},n=0,r=e.length;n<r;n++)t[e[n]]=!0;return t}(t);if(i){var a="_design/"+i[0],c=2===i.length&&i[1],f=o.find(function(e){return!(!c||e.ddoc!==a||c!==e.name)||e.ddoc===a});if(!f)throw{error:"unknown_error",message:"Could not find that index or could not use that index for the query"};return f}return function(e,t){for(var n=null,r=-1,i=0,o=e.length;i<o;i++){var u=e[i],s=t(u);s>r&&(r=s,n=u)}return n}(o,function(e){for(var t=e.def.fields.map(S),n=0,r=0,i=t.length;r<i;r++){var o=t[r];s[o]&&n++}return n})}function $e(e,t){switch(e){case"$eq":return{startkey:t,endkey:t};case"$lte":return{endkey:t};case"$gte":return{startkey:t};case"$lt":return{endkey:t,inclusive_end:!1};case"$gt":return{startkey:t,inclusive_start:!1}}}function xe(e,t){return t.defaultUsed?function(e){return{queryOpts:{startkey:null},inMemoryFields:[Object.keys(e)]}}(e):1===t.def.fields.length?function(e,t){var n,r=S(t.def.fields[0]),i=e[r]||{},o=[];return Object.keys(i).forEach(function(e){if(we(e))o.push(r);else{var t=function(e,t){switch(e){case"$eq":return{key:t};case"$lte":return{endkey:t};case"$gte":return{startkey:t};case"$lt":return{endkey:t,inclusive_end:!1};case"$gt":return{startkey:t,inclusive_start:!1}}}(e,i[e]);n=n?X([n,t]):t}}),{queryOpts:n,inMemoryFields:o}}(e,t):function(e,t){function n(e){!1!==r&&s.push(Qe),!1!==i&&a.push(Ze),u=o.slice(e)}for(var r,i,o=t.def.fields.map(S),u=[],s=[],a=[],c=0,f=o.length;c<f;c++){var l=e[o[c]];if(!l||!Object.keys(l).length){n(c);break}if(c>0){if(Object.keys(l).some(we)){n(c);break}var d="$gt"in l||"$gte"in l||"$lt"in l||"$lte"in l,h=Object.keys(e[o[c-1]]),p=Y(h,["$eq"]),v=Y(h,Object.keys(l));if(d&&!p&&!v){n(c);break}}for(var y=Object.keys(l),g=null,m=0;m<y.length;m++){var _=y[m],w=$e(_,l[_]);g=g?X([g,w]):w}s.push("startkey"in g?g.startkey:Qe),a.push("endkey"in g?g.endkey:Ze),"inclusive_start"in g&&(r=g.inclusive_start),"inclusive_end"in g&&(i=g.inclusive_end)}var b={startkey:s,endkey:a};return void 0!==r&&(b.inclusive_start=r),void 0!==i&&(b.inclusive_end=i),{queryOpts:b,inMemoryFields:u}}(e,t)}function je(e,t){var n=e.selector,r=function(e,t){var n,r=Object.keys(e),i=t?t.map(S):[];return n=r.length>=i.length?r:i,0===i.length?{fields:n}:(n=n.sort(function(e,t){var n=i.indexOf(e);-1===n&&(n=Number.MAX_VALUE);var r=i.indexOf(t);return-1===r&&(r=Number.MAX_VALUE),n<r?-1:n>r?1:0}),{fields:n,sortOrder:t.map(S)})}(n,e.sort),i=r.fields,o=ke(n,i,r.sortOrder,t,e.use_index),u=xe(n,o);return{queryOpts:u.queryOpts,index:o,inMemoryFields:me(u.inMemoryFields,o,n,i)}}function Ee(e,t,n){return t.selector&&(t.selector=function(e){var t=o(e),n=!1;"$and"in t&&(t=B(t.$and),n=!0),["$or","$nor"].forEach(function(e){e in t&&t[e].forEach(function(e){for(var t=Object.keys(e),n=0;n<t.length;n++){var r=t[n],i=e[r];"object"==typeof i&&null!==i||(e[r]={$eq:i})}})}),"$not"in t&&(t.$not=B([t.$not]));for(var r=Object.keys(t),i=0;i<r.length;i++){var u=r[i],s=t[u];"object"!=typeof s||null===s?s={$eq:s}:"$ne"in s&&!n&&(s.$ne=[s.$ne]),t[u]=s}return t}(t.selector)),t.sort&&(t.sort=function(e){if(!Array.isArray(e))throw new Error("invalid sort json - should be an array");return e.map(function(e){if("string"==typeof e){var t={};return t[e]="asc",t}return e})}(t.sort)),t.use_index&&(t.use_index=function(e){var t=[];return"string"==typeof e?t.push(e):t=e,t.map(function(e){return e.replace("_design/","")})}(t.use_index)),function(e){if("object"!=typeof e.selector)throw new Error("you must provide a selector when you find()")}(t),pe(e).then(function(r){e.constructor.emit("debug",["find","planning query",t]);var i=je(t,r.indexes);e.constructor.emit("debug",["find","query plan",i]);var u=i.index;!function(e,t){if(t.defaultUsed&&e.sort){var n=e.sort.filter(function(e){return"_id"!==Object.keys(e)[0]}).map(function(e){return Object.keys(e)[0]});if(n.length>0)throw new Error('Cannot sort on field(s) "'+n.join(",")+'" when using the default index')}t.defaultUsed}(t,u);var s=Ne({include_docs:!0,reduce:!1},i.queryOpts);if("startkey"in s&&"endkey"in s&&T(s.startkey,s.endkey)>0)return{docs:[]};return t.sort&&"string"!=typeof t.sort[0]&&"desc"===L(t.sort[0])&&(s.descending=!0,s=function(e){var t=o(e);return delete t.startkey,delete t.endkey,delete t.inclusive_start,delete t.inclusive_end,"endkey"in e&&(t.startkey=e.endkey),"startkey"in e&&(t.endkey=e.startkey),"inclusive_start"in e&&(t.inclusive_end=e.inclusive_start),"inclusive_end"in e&&(t.inclusive_start=e.inclusive_end),t}(s)),i.inMemoryFields.length||("limit"in t&&(s.limit=t.limit),"skip"in t&&(s.skip=t.skip)),n?Me.resolve(i,s):Me.resolve().then(function(){if("_all_docs"===u.name)return function(e,t){var n=o(t);return n.descending?("endkey"in n&&"string"!=typeof n.endkey&&(n.endkey=""),"startkey"in n&&"string"!=typeof n.startkey&&(n.limit=0)):("startkey"in n&&"string"!=typeof n.startkey&&(n.startkey=""),"endkey"in n&&"string"!=typeof n.endkey&&(n.limit=0)),"key"in n&&"string"!=typeof n.key&&(n.limit=0),e.allDocs(n).then(function(e){return e.rows=e.rows.filter(function(e){return!/^_design\//.test(e.id)}),e})}(e,s);var t=function(e){return e.ddoc.substring(8)+"/"+e.name}(u);return He.query.call(e,t,s)}).then(function(e){!1===s.inclusive_start&&(e.rows=function(e,t,n){for(var r=n.def.fields,i=0,o=e.length;i<o;i++){var u=he(e[i].doc,n);if(1===r.length)u=u[0];else for(;u.length>t.length;)u.pop();if(Math.abs(T(u,t))>0)break}return i>0?e.slice(i):e}(e.rows,s.startkey,u)),i.inMemoryFields.length&&(e.rows=N(e.rows,t,i.inMemoryFields));var n={docs:e.rows.map(function(e){var n=e.doc;return t.fields?function(e,t){for(var n={},r=0,i=t.length;r<i;r++){var o=A(t[r]),u=j(e,o);void 0!==u&&E(n,o,u)}return n}(n,t.fields):n})};return u.defaultUsed&&(n.warning="no matching index found, create an index to optimize query time"),n})})}var Oe=r(e(8)),Ae=r(e(5)),qe=r(e(1)),Se=e(2),Le=r(e(4)),Be=r(e(3)),Te=r(e(7)),Me="function"==typeof Promise?Promise:Ae,Ce=Function.prototype.toString,De=Ce.call(Object);c.prototype.get=function(e){var t=s(e);return this._store[t]},c.prototype.set=function(e,t){var n=s(e);return this._store[n]=t,!0},c.prototype.has=function(e){return s(e)in this._store},c.prototype.delete=function(e){var t=s(e),n=t in this._store;return delete this._store[t],n},c.prototype.forEach=function(e){for(var t=Object.keys(this._store),n=0,r=t.length;n<r;n++){var i=t[n];e(this._store[i],i=a(i))}},Object.defineProperty(c.prototype,"size",{get:function(){return Object.keys(this._store).length}}),f.prototype.add=function(e){return this._store.set(e,!0)},f.prototype.has=function(e){return this._store.has(e)},f.prototype.forEach=function(e){this._store.forEach(function(t,n){e(n)})},Object.defineProperty(f.prototype,"size",{get:function(){return this._store.size}});var Ie,Ue;!function(){if("undefined"==typeof Symbol||"undefined"==typeof Map||"undefined"==typeof Set)return!1;var e=Object.getOwnPropertyDescriptor(Map,Symbol.species);return e&&"get"in e&&Map[Symbol.species]===Map}()?(Ie=f,Ue=c):(Ie=Set,Ue=Map);var Fe;if(l())Fe=!1;else try{localStorage.setItem("_pouch_check_localstorage",1),Fe=!!localStorage.getItem("_pouch_check_localstorage")}catch(e){Fe=!1}Le(h,Se.EventEmitter),h.prototype.addListener=function(e,t,n,r){function i(){if(o._listeners[t])if(u)u="waiting";else{u=!0;var e=function(e,t){for(var n={},r=0,i=t.length;r<i;r++){var o=t[r];o in e&&(n[o]=e[o])}return n}(r,["style","include_docs","attachments","conflicts","filter","doc_ids","view","since","query_params","binary"]);n.changes(e).on("change",function(e){e.seq>r.since&&!r.cancelled&&(r.since=e.seq,r.onChange(e))}).on("complete",function(){"waiting"===u&&Be(i),u=!1}).on("error",function(){u=!1})}}if(!this._listeners[t]){var o=this,u=!1;this._listeners[t]=i,this.on(e,i)}},h.prototype.removeListener=function(e,t){t in this._listeners&&(Se.EventEmitter.prototype.removeListener.call(this,e,this._listeners[t]),delete this._listeners[t])},h.prototype.notifyLocalWindows=function(e){l()?chrome.storage.local.set({dbName:e}):d()&&(localStorage[e]="a"===localStorage[e]?"b":"a")},h.prototype.notify=function(e){this.emit(e),this.notifyLocalWindows(e)};var Ne="function"==typeof Object.assign?Object.assign:function(e){for(var t=Object(e),n=1;n<arguments.length;n++){var r=arguments[n];if(null!=r)for(var i in r)Object.prototype.hasOwnProperty.call(r,i)&&(t[i]=r[i])}return t};Le(v,Error),v.prototype.toString=function(){return JSON.stringify({status:this.status,name:this.name,message:this.message,reason:this.reason})};new v(401,"unauthorized","Name or password is incorrect."),new v(400,"bad_request","Missing JSON list of 'docs'"),new v(404,"not_found","missing"),new v(409,"conflict","Document update conflict"),new v(400,"bad_request","_id field must contain a string"),new v(412,"missing_id","_id is required for puts"),new v(400,"bad_request","Only reserved document ids may start with underscore."),new v(412,"precondition_failed","Database not open"),new v(500,"unknown_error","Database encountered an unknown error"),new v(500,"badarg","Some query argument is invalid"),new v(400,"invalid_request","Request was invalid"),new v(400,"query_parse_error","Some query parameter is invalid"),new v(500,"doc_validation","Bad special document member"),new v(400,"bad_request","Something wrong with the request"),new v(400,"bad_request","Document must be a JSON object"),new v(404,"not_found","Database not found"),new v(500,"indexed_db_went_bad","unknown"),new v(500,"web_sql_went_bad","unknown"),new v(500,"levelDB_went_went_bad","unknown"),new v(403,"forbidden","Forbidden by design doc validate_doc_update function"),new v(400,"bad_request","Invalid rev format"),new v(412,"file_exists","The database could not be created, the file already exists."),new v(412,"missing_stub","A pre-existing attachment stub wasn't found"),new v(413,"invalid_url","Provided URL is invalid"),Oe.v4;var Pe=["$or","$nor","$not"],Re=-324,ze=3,Ve="",Je={$elemMatch:function(e,t,n,r){return!!Array.isArray(r)&&(0!==r.length&&("object"==typeof r[0]?r.some(function(e){return P(e,t,Object.keys(t))}):r.some(function(r){return R(t,e,n,r)})))},$allMatch:function(e,t,n,r){return!!Array.isArray(r)&&(0!==r.length&&("object"==typeof r[0]?r.every(function(e){return P(e,t,Object.keys(t))}):r.every(function(r){return R(t,e,n,r)})))},$eq:function(e,t,n,r){return V(r)&&0===T(r,t)},$gte:function(e,t,n,r){return V(r)&&T(r,t)>=0},$gt:function(e,t,n,r){return V(r)&&T(r,t)>0},$lte:function(e,t,n,r){return V(r)&&T(r,t)<=0},$lt:function(e,t,n,r){return V(r)&&T(r,t)<0},$exists:function(e,t,n,r){return t?V(r):!V(r)},$mod:function(e,t,n,r){return z(r)&&function(e,t){var n=t[0],r=t[1];if(0===n)throw new Error("Bad divisor, cannot divide by zero");if(parseInt(n,10)!==n)throw new Error("Divisor is not an integer");if(parseInt(r,10)!==r)throw new Error("Modulus is not an integer");return parseInt(e,10)===e&&e%n===r}(r,t)},$ne:function(e,t,n,r){return t.every(function(e){return 0!==T(r,e)})},$in:function(e,t,n,r){return z(r)&&J(r,t)},$nin:function(e,t,n,r){return z(r)&&!J(r,t)},$size:function(e,t,n,r){return z(r)&&function(e,t){return e.length===t}(r,t)},$all:function(e,t,n,r){return Array.isArray(r)&&function(e,t){return t.every(function(t){return e.indexOf(t)>-1})}(r,t)},$regex:function(e,t,n,r){return z(r)&&function(e,t){return new RegExp(t).test(e)}(r,t)},$type:function(e,t,n,r){return function(e,t){switch(t){case"null":return null===e;case"boolean":return"boolean"==typeof e;case"number":return"number"==typeof e;case"string":return"string"==typeof e;case"array":return e instanceof Array;case"object":return"[object Object]"==={}.toString.call(e)}throw new Error(t+" not supported as a type.Please use one of object, string, array, number, boolean or null.")}(r,t)}},Ke=K(function(e){for(var t=[],n=0,r=e.length;n<r;n++){var i=e[n];Array.isArray(i)?t=t.concat(Ke.apply(null,i)):t.push(i)}return t}),We=function(e){return atob(e)};Q.prototype.add=function(e){return this.promise=this.promise.catch(function(){}).then(function(){return e()}),this.promise},Q.prototype.finish=function(){return this.promise};n.setImmediate||n.setTimeout;Le(ne,Error),Le(re,Error),Le(ie,Error);var Xe={},Ge=new Q,Ye=50,He=function(e,t,n,r){function i(e,t,n){try{t(n)}catch(t){fe(e,t)}}function o(e,t,n,r,i){try{return{output:t(n,r,i)}}catch(t){return fe(e,t),{error:t}}}function u(e,t){var n=T(e.key,t.key);return 0!==n?n:T(e.value,t.value)}function s(e){var t=e.value;return t&&"object"==typeof t&&t._id||e.id}function a(e){e.rows.forEach(function(e){var t=e.doc&&e.doc._attachments;t&&Object.keys(t).forEach(function(e){var n=t[e];t[e].data=function(e,t){return H(We(e),t)}(n.data,n.content_type)})})}function c(e){return function(t){return e.include_docs&&e.attachments&&e.binary&&a(t),t}}function f(e,t,n,r){var i=t[e];void 0!==i&&(r&&(i=encodeURIComponent(JSON.stringify(i))),n.push(e+"="+i))}function l(e){if(void 0!==e){var t=Number(e);return isNaN(t)||t!==parseInt(e,10)?e:t}}function d(e,t){var n=e.descending?"endkey":"startkey",r=e.descending?"startkey":"endkey";if(void 0!==e[n]&&void 0!==e[r]&&T(e[n],e[r])>0)throw new ne("No rows can match your key range, reverse your start_key and end_key or set {descending : true}");if(t.reduce&&!1!==e.reduce){if(e.include_docs)throw new ne("{include_docs:true} is invalid for reduce");if(e.keys&&e.keys.length>1&&!e.group&&!e.group_level)throw new ne("Multi-key fetches for reduce views must use {group: true}")}["group_level","limit","skip"].forEach(function(t){var n=function(e){if(e){if("number"!=typeof e)return new ne('Invalid value for integer: "'+e+'"');if(e<0)return new ne('Invalid value for positive integer: "'+e+'"')}}(e[t]);if(n)throw n})}function h(e){return function(t){if(404===t.status)return e;throw t}}function p(e,t,n){var r="_local/doc_"+e,i={_id:r,keys:[]},o=n.get(e),u=o[0];return(function(e){return 1===e.length&&/^1-/.test(e[0].rev)}(o[1])?Me.resolve(i):t.db.get(r).catch(h(i))).then(function(e){return function(e){return e.keys.length?t.db.allDocs({keys:e.keys,include_docs:!0}):Me.resolve({rows:[]})}(e).then(function(t){return function(e,t){for(var n=[],r=new Ie,i=0,o=t.rows.length;i<o;i++){var s=t.rows[i].doc;if(s&&(n.push(s),r.add(s._id),s._deleted=!u.has(s._id),!s._deleted)){var a=u.get(s._id);"value"in a&&(s.value=a.value)}}var c=ae(u);return c.forEach(function(e){if(!r.has(e)){var t={_id:e},i=u.get(e);"value"in i&&(t.value=i.value),n.push(t)}}),e.keys=se(c.concat(e.keys)),n.push(e),n}(e,t)})})}function v(e){var t="string"==typeof e?e:e.name,n=Xe[t];return n||(n=Xe[t]=new Q),n}function m(e){return ue(v(e),function(){return function(e){function n(t,n){return function(){return function(e,t,n){return e.db.get("_local/lastSeq").catch(h({_id:"_local/lastSeq",seq:0})).then(function(r){var i=ae(t);return Me.all(i.map(function(n){return p(n,e,t)})).then(function(t){var i=y(t);return r.seq=n,i.push(r),e.db.bulkDocs({docs:i})})})}(e,t,n)}}function r(){return e.sourceDB.changes({conflicts:!0,include_docs:!0,style:"all_docs",since:f,limit:Ye}).then(o)}function o(t){var o=t.results;if(o.length){var d=function(t){for(var n=new Ue,r=0,o=t.length;r<o;r++){var l=t[r];if("_"!==l.doc._id[0]){s=[],(a=l.doc)._deleted||i(e.sourceDB,c,a),s.sort(u);var d=function(e){for(var t,n=new Ue,r=0,i=e.length;r<i;r++){var o=e[r],u=[o.key,o.id];r>0&&0===T(o.key,t)&&u.push(r),n.set(D(u),o),t=o.key}return n}(s);n.set(l.doc._id,[d,l.changes])}f=l.seq}return n}(o);if(l.add(n(d,f)),!(o.length<Ye))return r()}}var s,a,c=t(e.mapFun,function(e,t){var n={id:a._id,key:M(e)};void 0!==t&&null!==t&&(n.value=M(t)),s.push(n)}),f=e.seq||0,l=new Q;return r().then(function(){return l.finish()}).then(function(){e.seq=f})}(e)})()}function _(e,t){return ue(v(e),function(){return function(e,t){function r(t){return t.include_docs=!0,e.db.allDocs(t).then(function(e){return u=e.total_rows,e.rows.map(function(e){if("value"in e.doc&&"object"==typeof e.doc.value&&null!==e.doc.value){var t=Object.keys(e.doc.value).sort(),n=["id","key","value"];if(!(t<n||t>n))return e.doc.value}var r=function(e){for(var t=[],n=[],r=0;;){var i=e[r++];if("\0"!==i)switch(i){case"1":t.push(null);break;case"2":t.push("1"===e[r]),r++;break;case"3":var o=I(e,r);t.push(o.num),r+=o.length;break;case"4":for(var u="";;){var s=e[r];if("\0"===s)break;u+=s,r++}u=u.replace(/\u0001\u0001/g,"\0").replace(/\u0001\u0002/g,"").replace(/\u0002\u0002/g,""),t.push(u);break;case"5":var a={element:[],index:t.length};t.push(a.element),n.push(a);break;case"6":var c={element:{},index:t.length};t.push(c.element),n.push(c);break;default:throw new Error("bad collationIndex or unexpectedly reached end of input: "+i)}else{if(1===t.length)return t.pop();U(t,n)}}}(e.doc._id);return{key:r[0],id:r[1],value:"value"in e.doc?e.doc.value:null}})})}function i(r){var i;if(i=a?function(e,t,r){0===r.group_level&&delete r.group_level;var i=r.group||r.group_level,u=n(e.reduceFun),s=[],a=isNaN(r.group_level)?Number.POSITIVE_INFINITY:r.group_level;t.forEach(function(e){var t=s[s.length-1],n=i?e.key:null;if(i&&Array.isArray(n)&&(n=n.slice(0,a)),t&&0===T(t.groupKey,n))return t.keys.push([e.key,e.id]),void t.values.push(e.value);s.push({keys:[[e.key,e.id]],values:[e.value],groupKey:n})}),t=[];for(var c=0,f=s.length;c<f;c++){var l=s[c],d=o(e.sourceDB,u,l.keys,l.values,!1);if(d.error&&d.error instanceof ie)throw d.error;t.push({value:d.error?null:d.output,key:l.groupKey})}return{rows:function(e,t,n){return n=n||0,"number"==typeof t?e.slice(n,t+n):n>0?e.slice(n):e}(t,r.limit,r.skip)}}(e,r,t):{total_rows:u,offset:c,rows:r},t.update_seq&&(i.update_seq=e.seq),t.include_docs){var f=se(r.map(s));return e.sourceDB.allDocs({keys:f,include_docs:!0,conflicts:t.conflicts,attachments:t.attachments,binary:t.binary}).then(function(e){var t=new Ue;return e.rows.forEach(function(e){t.set(e.id,e.doc)}),r.forEach(function(e){var n=s(e),r=t.get(n);r&&(e.doc=r)}),i})}return i}var u,a=e.reduceFun&&!1!==t.reduce,c=t.skip||0;if(void 0===t.keys||t.keys.length||(t.limit=0,delete t.keys),void 0!==t.keys){var f=t.keys,l=f.map(function(e){var n={startkey:D([e]),endkey:D([e,{}])};return t.update_seq&&(n.update_seq=!0),r(n)});return Me.all(l).then(y).then(i)}var d={descending:t.descending};t.update_seq&&(d.update_seq=!0);var h,p;if("start_key"in t&&(h=t.start_key),"startkey"in t&&(h=t.startkey),"end_key"in t&&(p=t.end_key),"endkey"in t&&(p=t.endkey),void 0!==h&&(d.startkey=t.descending?D([h,{}]):D([h])),void 0!==p){var v=!1!==t.inclusive_end;t.descending&&(v=!v),d.endkey=D(v?[p,{}]:[p])}if(void 0!==t.key){var g=D([t.key]),m=D([t.key,{}]);d.descending?(d.endkey=g,d.startkey=m):(d.startkey=g,d.endkey=m)}return a||("number"==typeof t.limit&&(d.limit=t.limit),d.skip=c),r(d).then(i)}(e,t)})()}function w(t,n,i){if("function"==typeof t._query)return function(e,t,n){return new Me(function(r,i){e._query(t,n,function(e,t){if(e)return i(e);r(t)})})}(t,n,i);if(g(t))return function(e,t,n){var r,i=[],o="GET";if(f("reduce",n,i),f("include_docs",n,i),f("attachments",n,i),f("limit",n,i),f("descending",n,i),f("group",n,i),f("group_level",n,i),f("skip",n,i),f("stale",n,i),f("conflicts",n,i),f("startkey",n,i,!0),f("start_key",n,i,!0),f("endkey",n,i,!0),f("end_key",n,i,!0),f("inclusive_end",n,i),f("key",n,i,!0),f("update_seq",n,i),i=i.join("&"),i=""===i?"":"?"+i,void 0!==n.keys){var u="keys="+encodeURIComponent(JSON.stringify(n.keys));u.length+i.length+1<=2e3?i+=("?"===i[0]?"&":"?")+u:(o="POST","string"==typeof t?r={keys:n.keys}:t.keys=n.keys)}if("string"==typeof t){var s=ce(t);return e.request({method:o,url:"_design/"+s[0]+"/_view/"+s[1]+i,body:r}).then(function(e){return e.rows.forEach(function(e){if(e.value&&e.value.error&&"builtin_reduce_error"===e.value.error)throw new Error(e.reason)}),e}).then(c(n))}return r=r||{},Object.keys(t).forEach(function(e){Array.isArray(t[e])?r[e]=t[e]:r[e]=t[e].toString()}),e.request({method:"POST",url:"_temp_view"+i,body:r}).then(c(n))}(t,n,i);if("string"!=typeof n)return d(i,n),Ge.add(function(){return te(t,"temp_view/temp_view",n.map,n.reduce,!0,e).then(function(e){return function(e,t){return e.then(function(e){return t().then(function(){return e})},function(e){return t().then(function(){throw e})})}(m(e).then(function(){return _(e,i)}),function(){return e.db.destroy()})})}),Ge.finish();var o=n,u=ce(o),s=u[0],a=u[1];return t.get("_design/"+s).then(function(n){var u=n.views&&n.views[a];if(!u)throw new re("ddoc "+n._id+" has no view named "+a);return r(n,a),d(i,u),te(t,o,u.map,u.reduce,!1,e).then(function(e){return"ok"===i.stale||"update_after"===i.stale?("update_after"===i.stale&&Be(function(){m(e)}),_(e,i)):m(e).then(function(){return _(e,i)})})})}return{query:function(e,t,n){var r=this;"function"==typeof t&&(n=t,t={}),t=t?function(e){return e.group_level=l(e.group_level),e.limit=l(e.limit),e.skip=l(e.skip),e}(t):{},"function"==typeof e&&(e={map:e});var i=Me.resolve().then(function(){return w(r,e,t)});return oe(i,n),i},viewCleanup:function(e){return qe(function(t){var n=t.pop(),r=e.apply(this,t);return"function"==typeof n&&oe(r,n),r})}(function(){return"function"==typeof this._viewCleanup?function(e){return new Me(function(t,n){e._viewCleanup(function(e,r){if(e)return n(e);t(r)})})}(this):g(this)?function(e){return e.request({method:"POST",url:"_view_cleanup"})}(this):function(t){return t.get("_local/"+e).then(function(e){var n=new Ue;Object.keys(e.views).forEach(function(e){var t=ce(e),r="_design/"+t[0],i=t[1],o=n.get(r);o||(o=new Ie,n.set(r,o)),o.add(i)});var r={keys:ae(n),include_docs:!0};return t.allDocs(r).then(function(r){var i={};r.rows.forEach(function(t){var r=t.key.substring(8);n.get(t.key).forEach(function(n){var o=r+"/"+n;e.views[o]||(o=n);var u=Object.keys(e.views[o]),s=t.doc&&t.doc.views&&t.doc.views[n];u.forEach(function(e){i[e]=i[e]||s})})});var o=Object.keys(i).filter(function(e){return!i[e]}).map(function(e){return ue(v(e),function(){return new t.constructor(e,t.__opts).destroy()})()});return Me.all(o).then(function(){return{ok:!0}})})},h({ok:!0}))}(this)})}}("indexes",function(e,t){return le(Object.keys(e.fields),t)},function(){throw new Error("reduce not supported")},function(e,t){var n=e.views[t];if(!n.map||!n.map.fields)throw new Error("ddoc "+e._id+" with view "+t+" doesn't have map.fields defined. maybe it wasn't created by this plugin?")}),Qe=null,Ze={"":{}},et=["$eq","$gt","$gte","$lt","$lte"],tt=W(function(e,t){function n(){return i||(i=Z(JSON.stringify(t)))}var r=o((t=_(t)).index);t.index=de(t.index),function(e){var t=e.fields.filter(function(e){return"asc"===L(e)});if(0!==t.length&&t.length!==e.fields.length)throw new Error("unsupported mixed sorting")}(t.index);var i,u=t.name||"idx-"+n(),s=t.ddoc||"idx-"+n(),a="_design/"+s,c=!1,f=!1;return e.constructor.emit("debug",["find","creating index",a]),m(e,a,function(e){return e._rev&&"query"!==e.language&&(c=!0),e.language="query",e.views=e.views||{},!(f=!!e.views[u])&&(e.views[u]={map:{fields:X(t.index.fields)},reduce:"_count",options:{def:r}},e)}).then(function(){if(c)throw new Error('invalid language for ddoc with id "'+a+'" (should be "query")')}).then(function(){var t=s+"/"+u;return He.query.call(e,t,{limit:0,reduce:!1}).then(function(){return{id:a,name:u,result:f?"exists":"created"}})})}),nt=W(Ee),rt=W(function(e,t){return Ee(e,t,!0).then(function(n){return{dbname:e.name,index:n.index,selector:t.selector,range:{start_key:n.queryOpts.startkey,end_key:n.queryOpts.endkey},opts:{use_index:t.use_index||[],bookmark:"nil",limit:t.limit,skip:t.skip,sort:t.sort||{},fields:t.fields,conflicts:!1,r:[49]},limit:t.limit,skip:t.skip||0,fields:t.fields}})}),it=W(pe),ot=W(function(e,t){if(!t.ddoc)throw new Error("you must supply an index.ddoc when deleting");if(!t.name)throw new Error("you must supply an index.name when deleting");var n=t.ddoc,r=t.name;return m(e,n,function(e){return 1===Object.keys(e.views).length&&e.views[r]?{_id:n,_deleted:!0}:(delete e.views[r],e)}).then(function(){return He.viewCleanup.apply(e)}).then(function(){return{ok:!0}})}),ut={};ut.createIndex=u(function(e,t){if("object"!=typeof e)return t(new Error("you must provide an index to create"));(g(this)?w:tt)(this,e,t)}),ut.find=u(function(e,t){if(void 0===t&&(t=e,e=void 0),"object"!=typeof e)return t(new Error("you must provide search parameters to find()"));(g(this)?b:nt)(this,e,t)}),ut.explain=u(function(e,t){if(void 0===t&&(t=e,e=void 0),"object"!=typeof e)return t(new Error("you must provide search parameters to explain()"));(g(this)?k:rt)(this,e,t)}),ut.getIndexes=u(function(e){(g(this)?$:it)(this,e)}),ut.deleteIndex=u(function(e,t){if("object"!=typeof e)return t(new Error("you must provide an index to delete"));(g(this)?x:ot)(this,e,t)}),"undefined"==typeof PouchDB?p("error",'pouchdb-find plugin error: Cannot find global "PouchDB" object! Did you remember to include pouchdb.js?'):PouchDB.plugin(ut)}).call(this,e(6),"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{1:1,2:2,3:3,4:4,5:5,6:6,7:7,8:8}]},{},[13]); | PypiClean |
/MnemoPwd-1.2.1-py3-none-any.whl/mnemopwd/server/clients/protocol/StateS2.py |
# Copyright (c) 2015-2016, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
State S2 : Login or CountCreation
"""
from ...util.funcutils import singleton
@singleton
class StateS2:
"""State S2 : select Login substate (S21) or
CountCreation substate (S22)"""
def do(self, client, data):
"""Action of the state S2: select substate S21 or S22"""
is_cd_S21 = data[170:175] == b"LOGIN" # Test for S21 substate
is_cd_S22 = data[170:178] == b"CREATION" # Test for S22 substate
if is_cd_S21:
client.state = client.states['21'] # S21 is the new state
if is_cd_S22:
client.state = client.states['22'] # S22 is the new state
if is_cd_S21 or is_cd_S22:
# Schedule an execution of the new state
client.loop.run_in_executor(None, client.state.do, client, data)
else:
# Schedule a callback to client exception handler
client.loop.call_soon_threadsafe(
client.exception_handler, Exception('S2 protocol error')) | PypiClean |
/HBT_IP_Test-1.0.1-py3-none-any.whl/HBT_IP_Test/libs/isom/python/IsomDeviceCollections_e_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import IsomStdDef_pb2 as IsomStdDef__pb2
import IsomDevices_pb2 as IsomDevices__pb2
import IsomDeviceCollections_pb2 as IsomDeviceCollections__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='IsomDeviceCollections_e.proto',
package='Honeywell.Security.ISOM.DeviceCollections',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x1dIsomDeviceCollections_e.proto\x12)Honeywell.Security.ISOM.DeviceCollections\x1a\x10IsomStdDef.proto\x1a\x11IsomDevices.proto\x1a\x1bIsomDeviceCollections.proto\"\xbd\x01\n\x18\x44\x65viceCollectionConfig_e2\xa0\x01\n\x1e\x44\x65viceCollectionAssignedDevice\x12\x41.Honeywell.Security.ISOM.DeviceCollections.DeviceCollectionConfig\x18\xa1\xf7\x36 \x03(\x0b\x32-.Honeywell.Security.ISOM.Devices.DeviceConfigB\x04\xa0\xb5\x18\x01')
,
dependencies=[IsomStdDef__pb2.DESCRIPTOR,IsomDevices__pb2.DESCRIPTOR,IsomDeviceCollections__pb2.DESCRIPTOR,])
_DEVICECOLLECTIONCONFIG_E = _descriptor.Descriptor(
name='DeviceCollectionConfig_e',
full_name='Honeywell.Security.ISOM.DeviceCollections.DeviceCollectionConfig_e',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
_descriptor.FieldDescriptor(
name='DeviceCollectionAssignedDevice', full_name='Honeywell.Security.ISOM.DeviceCollections.DeviceCollectionConfig_e.DeviceCollectionAssignedDevice', index=0,
number=900001, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=_b('\240\265\030\001'), file=DESCRIPTOR),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=332,
)
DESCRIPTOR.message_types_by_name['DeviceCollectionConfig_e'] = _DEVICECOLLECTIONCONFIG_E
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeviceCollectionConfig_e = _reflection.GeneratedProtocolMessageType('DeviceCollectionConfig_e', (_message.Message,), {
'DESCRIPTOR' : _DEVICECOLLECTIONCONFIG_E,
'__module__' : 'IsomDeviceCollections_e_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.DeviceCollections.DeviceCollectionConfig_e)
})
_sym_db.RegisterMessage(DeviceCollectionConfig_e)
_DEVICECOLLECTIONCONFIG_E.extensions_by_name['DeviceCollectionAssignedDevice'].message_type = IsomDevices__pb2._DEVICECONFIG
IsomDeviceCollections__pb2.DeviceCollectionConfig.RegisterExtension(_DEVICECOLLECTIONCONFIG_E.extensions_by_name['DeviceCollectionAssignedDevice'])
_DEVICECOLLECTIONCONFIG_E.extensions_by_name['DeviceCollectionAssignedDevice']._options = None
# @@protoc_insertion_point(module_scope) | PypiClean |
/Eventory-0.0.6.tar.gz/Eventory-0.0.6/eventory/ext/inktory.py | import clr
import os
import subprocess
import sys
from os import path
from tempfile import TemporaryDirectory
from System.IO import FileNotFoundException
from eventory import EventoryParser, Eventructor, register_parser
try:
clr.AddReference("ink-engine-runtime")
except FileNotFoundException:
raise FileNotFoundError(f"Couldn't find \"ink-engine-runtime.dll\", please add it to the CWD ({os.getcwd()}) in order to use inktory. You can "
"download it from here: https://github.com/inkle/ink/releases") from None
else:
from Ink.Runtime import Story
if sys.platform == "linux":
INKLECATE_CMD = ["mono", "inklecate.exe"]
else:
INKLECATE_CMD = ["inklecate.exe"]
class InkEventructor(Eventructor):
def init(self):
self.story = Story(self.content)
async def index_input(self, max_index: int) -> int:
while True:
inp = await self.narrator.input()
inp = inp.strip()
if inp.isnumeric():
num = int(inp)
if 0 < num <= max_index:
return num - 1
await self.narrator.output(f"Please use a number between 1 and {max_index}\n")
async def play(self):
await self.prepare()
story = self.story
while True:
while story.canContinue:
out = story.Continue()
await self.narrator.output(out)
if story.currentChoices.Count > 0:
out = "\n".join(f"{i}. {choice.text}" for i, choice in enumerate(story.currentChoices, 1)) + "\n"
await self.narrator.output(out)
index = await self.index_input(story.currentChoices.Count)
story.ChooseChoiceIndex(index)
else:
break
class EventoryInkParser(EventoryParser):
instructor = InkEventructor
@staticmethod
def parse_content(content) -> Story:
content = EventoryInkParser.compile(content)
return content
@staticmethod
def compile(ink: str) -> str:
with TemporaryDirectory() as directory:
in_dir = path.join(directory, "input.ink")
out_dir = in_dir + ".json"
with open(in_dir, "w+") as f:
f.write(ink)
try:
subprocess.run([*INKLECATE_CMD, in_dir], check=True)
except FileNotFoundError:
raise FileNotFoundError(
f"Couldn't find \"inklecate.exe\", please add it to your PATH or to the CWD ({os.getcwd()}) in order to use inktory. You can "
"download it from here: https://github.com/inkle/ink/releases") from None
with open(out_dir, "r", encoding="utf-8-sig") as f:
data = f.read()
return data
register_parser(EventoryInkParser, ("Ink",)) | PypiClean |
/Gbtestapi0.1-0.1a10-py3-none-any.whl/gailbot/core/utils/download.py |
import shutil
from typing import List
from tqdm.auto import tqdm
import requests
from zipfile import ZipFile
import os
import socket
from .logger import makelogger
logger = makelogger("download")
def download_from_urls(
urls: List[str], download_dir: str, unzip: bool = True, chunkSize: int = 8192
) -> List[str]:
"""
Download from a list of urls and return a path to the directory containing
the data from each url
"""
# Create paths
dataset_download_path = os.path.join(download_dir, "download")
dataset_extract_path = download_dir
if os.path.isdir(dataset_download_path):
shutil.rmtree(dataset_download_path)
os.makedirs(dataset_download_path)
os.makedirs(dataset_extract_path, exist_ok=True)
# Download each url as a zip file.
extracted_paths = []
for i, url in enumerate(urls):
# Create a temp. dir for this specific url
name = os.path.splitext(os.path.basename(url))[0]
url_temp_path = "{}.zip".format(os.path.join(dataset_download_path, name))
with requests.get(url, stream=True) as r:
r.raise_for_status()
pbar = tqdm(
total=int(r.headers.get("content-length", 0)), desc="{}".format(name)
)
with open(url_temp_path, "wb+") as f:
for chunk in r.iter_content(chunk_size=chunkSize):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
pbar.update(len(chunk))
if unzip:
with ZipFile(url_temp_path, "r") as zipObj:
# Extract all the contents of zip file in different directory
extract_path = os.path.join(dataset_extract_path, name)
extracted_paths.append(extract_path)
if os.path.exists(extract_path):
shutil.rmtree(extract_path)
os.makedirs(extract_path)
zipObj.extractall(extract_path)
# Remove the temp folders
shutil.rmtree(dataset_download_path)
return extracted_paths
def is_internet_connected() -> bool:
"""
True if connected to the internet, false otherwise
"""
try:
# connect to the host -- tells us if the host is actually
# reachable
sock = socket.create_connection(("www.google.com", 80))
if sock is not None:
print("Clossing socket")
sock.close
return True
except OSError:
pass
return False | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/plugins/filter/age.py | from datetime import datetime
from dateutil.parser import parse as dateutil_parse
from loguru import logger
from flexget import plugin
from flexget.event import event
from flexget.utils.tools import parse_timedelta
logger = logger.bind(name='age')
class Age:
"""
Rejects/accepts entries based on date in specified entry field
Example:
age:
field: 'accessed' # 'accessed' is a field set from filesystem plugin
age: '7 days'
action: 'accept'
"""
schema = {
'type': 'object',
'properties': {
'field': {'type': 'string'},
'action': {'type': 'string', 'enum': ['accept', 'reject']},
'age': {'type': 'string', 'format': 'interval'},
},
'required': ['field', 'action', 'age'],
'additionalProperties': False,
}
def on_task_filter(self, task, config):
for entry in task.entries:
field = config['field']
if field not in entry:
entry.fail(f'Field {field} does not exist')
continue
field_value = entry[field]
if isinstance(field_value, datetime):
field_date = field_value
elif isinstance(field_value, float):
field_date = datetime.fromtimestamp(field_value)
elif isinstance(field_value, str):
try:
field_date = dateutil_parse(entry[field])
except ValueError:
logger.warning(
'Entry {} ignored: {} is not a valid date', entry['title'], field_value
)
continue
else:
logger.warning(
'Entry {} ignored: {} is not a valid date', entry['title'], field_value
)
continue
age_cutoff = datetime.now() - parse_timedelta(config['age'])
if field_date < age_cutoff:
info_string = 'Date in field `{}` is older than {}'.format(field, config['age'])
if config['action'] == 'accept':
entry.accept(info_string)
else:
entry.reject(info_string)
logger.debug(
'Entry {} was {}ed because date in field `{}` is older than {}',
entry['title'],
config['action'],
field,
config['age'],
)
@event('plugin.register')
def register_plugin():
plugin.register(Age, 'age', api_ver=2) | PypiClean |
/DendroPy-4.6.1.tar.gz/DendroPy-4.6.1/src/dendropy/dataio/nexusreader.py |
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Implementation of NEXUS-schema data reader.
"""
import re
import collections
from dendropy.utility import error
from dendropy.utility import textprocessing
from dendropy.dataio import ioservice
from dendropy.dataio import nexusprocessing
from dendropy.dataio import newickreader
###############################################################################
## NexusReader
class NexusReader(ioservice.DataReader):
"Encapsulates loading and parsing of a NEXUS schema file."
class BlockTerminatedException(Exception):
pass
class NexusReaderError(error.DataParseError):
def __init__(self, message,
line_num=None,
col_num=None,
stream=None):
error.DataParseError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class NotNexusFileError(NexusReaderError):
def __init__(self, message,
line_num=None,
col_num=None,
stream=None):
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class LinkRequiredError(NexusReaderError):
def __init__(self, message,
line_num=None,
col_num=None,
stream=None):
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class NoCharacterBlocksFoundError(NexusReaderError):
def __init__(self, message,
line_num=None,
col_num=None,
stream=None):
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class UndefinedBlockError(NexusReaderError):
def __init__(self, message,
line_num=None,
col_num=None,
stream=None):
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class MultipleBlockWithSameTitleError(NexusReaderError):
def __init__(self, message,
line_num=None,
col_num=None,
stream=None):
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class InvalidCharacterStateSymbolError(NexusReaderError):
def __init__(self, message,
line_num=None,
col_num=None,
stream=None):
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class InvalidContinuousCharacterValueError(NexusReaderError):
def __init__(self, message,
line_num=None,
col_num=None,
stream=None):
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class TooManyTaxaError(NexusReaderError):
def __init__(self,
taxon_namespace,
max_taxa,
label,
line_num=None,
col_num=None,
stream=None):
message = "Cannot add taxon with label '{}': Declared number of taxa ({}) already defined: {}".format(
label,
max_taxa,
str(["{}".format(t.label) for t in taxon_namespace]))
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class UndefinedTaxonError(NexusReaderError):
def __init__(self,
taxon_namespace,
label,
line_num=None,
col_num=None,
stream=None):
message = "Taxon '{}' is not in the set of defined taxa: {}".format(
label,
str(["{}".format(t.label) for t in taxon_namespace]))
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class TooManyCharactersError(NexusReaderError):
def __init__(self,
max_characters,
character,
line_num=None,
col_num=None,
stream=None):
message = "Cannot add '{}' to sequence: declared sequence length ({}) will be exceeded".format(
character, max_characters)
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
class IncompleteBlockError(NexusReaderError):
def __init__(self, message,
line_num=None,
col_num=None,
stream=None):
NexusReader.NexusReaderError.__init__(self,
message=message,
line_num=line_num,
col_num=col_num,
stream=stream)
###########################################################################
## Life-cycle and Setup
def __init__(self, **kwargs):
"""
Keyword Arguments
-----------------
rooting : string, {['default-unrooted'], 'default-rooted', 'force-unrooted', 'force-rooted'}
Specifies how trees in the data source should be intepreted with
respect to their rooting:
'default-unrooted' [default]:
All trees are interpreted as unrooted unless a '[&R]'
comment token explicitly specifies them as rooted.
'default-rooted'
All trees are interpreted as rooted unless a '[&U]'
comment token explicitly specifies them as unrooted.
'force-unrooted'
All trees are unconditionally interpreted as unrooted.
'force-rooted'
All trees are unconditionally interpreted as rooted.
edge_length_type : type, default: ``float``
Specifies the type of the edge lengths (``int`` or ``float``). Tokens
interpreted as branch lengths will be cast to this type.
Defaults to ``float``.
suppress_edge_lengths : boolean, default: |False|
If |True|, edge length values will not be processed. If |False|,
edge length values will be processed.
extract_comment_metadata : boolean, default: |True|
If |True| (default), any comments that begin with '&' or '&&' will
be parsed and stored as part of the annotation set of the
corresponding object (accessible through the ``annotations``
attribute of the object). This requires that the comment
contents conform to a particular format (NHX or BEAST: 'field =
value'). If |False|, then the comments will not be parsed,
but will be instead stored directly as elements of the ``comments``
list attribute of the associated object.
store_tree_weights : boolean, default: |False|
If |True|, process the tree weight (e.g. "[&W 1/2]") comment
associated with each tree, if any. Defaults to |False|.
encode_splits : boolean, default: |False|
If |True|, split hash bitmasks will be calculated and attached to
the edges.
finish_node_fn : function object, default: |None|
If specified, this function will be applied to each node after
it has been constructed.
case_sensitive_taxon_labels : boolean, default: |False|
If |True|, then taxon labels are case sensitive (e.g., "P.regius"
and "P.REGIUS" wil be treated as different operation taxonomic
unit concepts). Otherwise, taxon label intepretation will be made
without regard for case.
preserve_underscores : boolean, default: |False|
If |True|, unquoted underscores in labels will *not* converted to
spaces. Defaults to |False|: all underscores not protected by
quotes will be converted to spaces.
suppress_internal_node_taxa : boolean, default: |True|
If |False|, internal node labels will be instantantiated into
|Taxon| objects. If |True|, internal node labels
will *not* be instantantiated as strings.
suppress_leaf_node_taxa : boolean, default: |False|
If |False|, leaf (external) node labels will be instantantiated
into |Taxon| objects. If |True|, leaff (external) node
labels will *not* be instantantiated as strings.
terminating_semicolon_required : boolean, default: |True|
If |True| [default], then a tree statement that does not end in a
semi-colon is an error. If |False|, then no error will be raised.
unconstrained_taxa_accumulation_mode : bool
If |True|, then no error is raised even if the number of taxon
names defined exceeds the number of declared taxa (as specified by
'NTAX'). Defaults to |False|.
automatically_substitute_missing_taxa_blocks : bool
If |True| then, if a taxon namespace is linked to by title but is
not given in the data file, then, if one and exactly one other
taxon namespace has been given in the data file, this taxon
namespace will be used; if there are multiple taxon namespaces,
then if ``automatically_create_missing_taxa_blocks`` is |True| a
new taxon namespace will be created, otherwise an error is raised.
Default is |False|: if a taxon namespace is linked to by title but
is not given in the data file, then an error is raised.
automatically_create_missing_taxa_blocks : bool
If |True| then taxon namespaces linked to by title but not given in
the data file will be automatically created. If |False| taxon
namespaces linked to by title but not given in the data file will
result in error.
exclude_chars : bool
If |False|, then character data will not be read. Defaults to
|True|: character data will be read.
exclude_trees : bool
If |False|, then tree data will not be read. Defaults to
|True|: tree data will be read.
store_ignored_blocks : bool
If |True|, then ignored NEXUS blocks will be stored under the annotation
(NOT attribute!) ``ignored_nexus_blocks''.
To dereference, for e.g.: ``dataset.annotations["ignored_nexus_blocks"]``.
Defaults to |False|: non-character and tree blocks will not be read.
attached_taxon_namespace : |TaxonNamespace|
Unify all operational taxonomic unit definitions in this namespace.
ignore_unrecognized_keyword_arguments : boolean, default: |False|
If |True|, then unsupported or unrecognized keyword arguments will
not result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
"""
# base
ioservice.DataReader.__init__(self)
# Following are NEXUS-parsing specific (i.e., not used by NEWICK
# parsers), and need to be removed so as not to cause problems with our
# keyword validation scheme
self.exclude_chars = kwargs.pop("exclude_chars", False)
self.exclude_trees = kwargs.pop("exclude_trees", False)
self.store_ignored_blocks = kwargs.pop("store_ignored_blocks", False)
self._data_type = kwargs.pop("data_type", "standard")
self.attached_taxon_namespace = kwargs.pop("attached_taxon_namespace", None)
# Following are undocumented for a GOOD reason! They are experimental and subject to change!
self.unconstrained_taxa_accumulation_mode = kwargs.pop("unconstrained_taxa_accumulation_mode", False)
self.automatically_create_missing_taxa_blocks = kwargs.pop("automatically_create_missing_taxa_blocks", False)
self.automatically_substitute_missing_taxa_blocks = kwargs.pop("automatically_substitute_missing_taxa_blocks", False)
# The following are used by NewickReader in addition to NexusReader, So
# they are extracted/set here and then forwarded on ...
self.preserve_underscores = kwargs.get('preserve_underscores', False)
self.case_sensitive_taxon_labels = kwargs.get('case_sensitive_taxon_labels', False)
self.extract_comment_metadata = kwargs.get('extract_comment_metadata', True)
# As above, but the NEXUS format default is different from the NEWICK
# default, so this rather convoluted approach
# self.extract_comment_metadata = kwargs.pop('extract_comment_metadata', True)
# kwargs["extract_comment_metadata"] = self.extract_comment_metadata
# Create newick handler
self.newick_reader = newickreader.NewickReader(**kwargs)
# Set up parsing meta-variables
self._interleave = False
self._symbols = ""
self._gap_char = '-'
self._missing_char = '?'
self._match_char = frozenset('.')
self._file_specified_ntax = None
self._file_specified_nchar = None
self._nexus_tokenizer = None
self._taxon_namespace_factory = None
self._tree_list_factory = None
self._char_matrix_factory = None
self._global_annotations_target = None
self._taxon_namespaces = []
self._char_matrices = []
self._tree_lists = []
self._product = None
self._ignored_blocks = []
###########################################################################
## Reader Implementation
def _read(self,
stream,
taxon_namespace_factory=None,
tree_list_factory=None,
char_matrix_factory=None,
state_alphabet_factory=None,
global_annotations_target=None):
"""
Instantiates and returns a DataSet object based on the
NEXUS-formatted contents given in the file-like object ``stream``.
"""
self._taxon_namespace_factory = taxon_namespace_factory
self._tree_list_factory = tree_list_factory
if self._tree_list_factory is None:
self.exclude_trees = True
self._char_matrix_factory = char_matrix_factory
if self._char_matrix_factory is None:
self.exclude_chars = True
self._state_alphabet_factory = state_alphabet_factory
self._global_annotations_target = global_annotations_target
self._parse_nexus_stream(stream)
self._product = self.Product(
taxon_namespaces=self._taxon_namespaces,
tree_lists=self._tree_lists,
char_matrices=self._char_matrices)
if self._global_annotations_target is not None and self._ignored_blocks:
a = self._global_annotations_target.annotations.find(name="ignored_nexus_blocks")
if a is None:
self._global_annotations_target.annotations.add_new(
name="ignored_nexus_blocks",
value=self._ignored_blocks,
datatype_hint="xsd:list",
)
else:
a.extend(self._ignored_blocks)
return self._product
###########################################################################
## Tokenizer Control
def create_tokenizer(self, stream, **kwargs):
self._nexus_tokenizer = nexusprocessing.NexusTokenizer(
stream, **kwargs)
return self._nexus_tokenizer
def set_stream(self, stream):
return self._nexus_tokenizer.set_stream(stream)
###########################################################################
## Book-keeping Control
def _nexus_error(self, message, error_type=None):
if error_type is None:
error_type = NexusReader.NexusReaderError
e = error_type(
message=message,
line_num=self._nexus_tokenizer.token_line_num,
col_num=self._nexus_tokenizer.token_column_num,
stream=self._nexus_tokenizer.src)
return e
def _too_many_taxa_error(self, taxon_namespace, label):
e = NexusReader.TooManyTaxaError(
taxon_namespace=taxon_namespace,
max_taxa=self._file_specified_ntax,
label=label,
line_num=self._nexus_tokenizer.token_line_num,
col_num=self._nexus_tokenizer.token_column_num,
stream=self._nexus_tokenizer.src)
return e
def _undefined_taxon_error(self, taxon_namespace, label):
e = NexusReader.UndefinedTaxonError(
taxon_namespace=taxon_namespace,
label=label,
line_num=self._nexus_tokenizer.token_line_num,
col_num=self._nexus_tokenizer.token_column_num,
stream=self._nexus_tokenizer.src)
return e
def _too_many_characters_error(self, character):
e = NexusReader.TooManyCharactersError(
max_characters=self._file_specified_nchar,
character=character,
line_num=self._nexus_tokenizer.token_line_num,
col_num=self._nexus_tokenizer.token_column_num,
stream=self._nexus_tokenizer.src)
return e
def _debug_print(self, message=None, out=None):
import sys
if out is None:
out = sys.stdout
if message is None:
message = ""
else:
message = " --- ({})".format(message)
out.write("--- Current Position: Line {}, Column {}; Current token [starting at line {} and column {}]: '{}'{}\n".format(
self._nexus_tokenizer.current_line_num,
self._nexus_tokenizer.current_column_num,
self._nexus_tokenizer.token_line_num,
self._nexus_tokenizer.token_column_num,
self._nexus_tokenizer.current_token,
message))
###########################################################################
## Data Management
def _new_taxon_namespace(self, title=None):
if self.attached_taxon_namespace is not None:
return self.attached_taxon_namespace
taxon_namespace = self._taxon_namespace_factory(label=title)
self._taxon_namespaces.append(taxon_namespace)
return taxon_namespace
def _get_taxon_namespace(self, title=None):
if self.attached_taxon_namespace is not None:
return self.attached_taxon_namespace
if title is None:
if len(self._taxon_namespaces) == 0:
return self._new_taxon_namespace(title=title)
elif len(self._taxon_namespaces) == 1:
return self._taxon_namespaces[0]
else:
raise self._nexus_error("Multiple taxa blocks defined: require 'LINK' statement", NexusReader.LinkRequiredError)
else:
found = []
for tns in self._taxon_namespaces:
if tns.label is not None and tns.label.upper() == title.upper():
found.append(tns)
if len(found) == 0:
if self.automatically_substitute_missing_taxa_blocks:
if len(self._taxon_namespaces) == 1:
return self._taxon_namespaces[0]
elif not self.automatically_create_missing_taxa_blocks:
raise self._nexus_error("Taxa block with title '{}' not found, and multiple taxa blocks are defined for this file: unable to automatically substitute".format(title), NexusReader.UndefinedBlockError)
if self.automatically_create_missing_taxa_blocks:
return self._new_taxon_namespace(title=title)
raise self._nexus_error("Taxa block with title '{}' not found".format(title), NexusReader.UndefinedBlockError)
elif len(found) > 1:
raise self._nexus_error("Multiple taxa blocks with title '{}' defined".format(title), NexusReader.MultipleBlockWithSameTitleError)
return found[0]
def _get_taxon_symbol_mapper(self, taxon_namespace, enable_lookup_by_taxon_number=True):
taxon_symbol_mapper = nexusprocessing.NexusTaxonSymbolMapper(
taxon_namespace=taxon_namespace,
enable_lookup_by_taxon_number=enable_lookup_by_taxon_number,
case_sensitive=self.case_sensitive_taxon_labels)
return taxon_symbol_mapper
def _new_char_matrix(self, data_type, taxon_namespace, title=None):
# if data_type is None:
# data_type = "standard"
char_matrix = self._char_matrix_factory(
data_type,
taxon_namespace=taxon_namespace,
label=title)
self._char_matrices.append(char_matrix)
return char_matrix
def _new_state_alphabet(self, *args, **kwargs):
return self._state_alphabet_factory(*args, **kwargs)
def _get_char_matrix(self, title=None):
if title is None:
if len(self._char_matrices) == 1:
return self._char_matrices[0]
elif len(self._char_matrices) == 0:
raise self._nexus_error("No character matrices defined", NexusReader.NoCharacterBlocksFoundError)
else:
raise self._nexus_error("Multiple character matrices defined: require 'LINK' statement", NexusReader.LinkRequiredError)
else:
found = []
for cm in self._char_matrices:
if cm.label.upper() == title.upper():
found.append(cm)
if len(found) == 0:
raise self._nexus_error("Character block with title '{}' not found".format(title), NexusReader.UndefinedBlockError)
elif len(found) > 1:
raise self._nexus_error("Multiple character blocks with title '{}' defined".format(title), NexusReader.MultipleBlockWithSameTitleError)
return found[0]
def _new_tree_list(self, taxon_namespace, title=None):
tree_list = self._tree_list_factory(
taxon_namespace=taxon_namespace,
label=title)
self._tree_lists.append(tree_list)
return tree_list
def _get_tree_list(self, title=None):
if title is None:
if len(self._tree_lists) == 1:
return self._tree_lists[0]
elif len(self._tree_lists) == 0:
raise self._nexus_error("No tree blocks defined", NexusReader.NoCharacterBlocksFoundError)
else:
raise self._nexus_error("Multiple tree blocks defined: require 'LINK' statement", NexusReader.LinkRequiredError)
else:
found = []
for tlst in self._tree_lists:
if tlst.label.upper() == title.upper():
found.append(tlst)
if len(found) == 0:
raise self._nexus_error("Trees block with title '{}' not found".format(title), NexusReader.UndefinedBlockError)
elif len(found) > 1:
raise self._nexus_error("Multiple trees blocks with title '{}' defined".format(title), NexusReader.MultipleBlockWithSameTitleError)
return found[0]
###########################################################################
## Main Stream Parse Driver
def _parse_nexus_stream(self, stream):
"Main file parsing driver."
if self._nexus_tokenizer is None:
self.create_tokenizer(stream,
preserve_unquoted_underscores=self.preserve_underscores)
else:
self._nexus_tokenizer.set_stream(stream)
token = self._nexus_tokenizer.next_token()
if token.upper() != "#NEXUS":
raise self._nexus_error("Expecting '#NEXUS', but found '{}'".format(token),
NexusReader.NotNexusFileError)
while not self._nexus_tokenizer.is_eof():
token = self._nexus_tokenizer.next_token_ucase()
while token != None and token != 'BEGIN' and not self._nexus_tokenizer.is_eof():
token = self._nexus_tokenizer.next_token_ucase()
self._nexus_tokenizer.process_and_clear_comments_for_item(
self._global_annotations_target,
self.extract_comment_metadata)
token = self._nexus_tokenizer.next_token_ucase()
if token == 'TAXA':
self._parse_taxa_block()
elif token == 'CHARACTERS' or token == 'DATA':
self._parse_characters_data_block()
elif token == 'TREES':
self._parse_trees_block()
elif token in ['SETS', 'ASSUMPTIONS', 'CODONS']:
if not self.exclude_chars:
self._nexus_tokenizer.skip_to_semicolon() # move past BEGIN command
link_title = None
block_title = None
while not (token == 'END' or token == 'ENDBLOCK') \
and not self._nexus_tokenizer.is_eof() \
and not token==None:
token = self._nexus_tokenizer.next_token_ucase()
if token == 'TITLE':
block_title = self._parse_title_statement()
elif token == "LINK":
link_title = self._parse_link_statement().get('characters')
elif token == 'CHARSET':
self._parse_charset_statement(block_title=block_title, link_title=link_title)
elif token == 'BEGIN':
raise self._nexus_error("'BEGIN' found without completion of previous block",
NexusReader.IncompleteBlockError)
self._nexus_tokenizer.skip_to_semicolon() # move past END command
elif token == 'BEGIN':
raise self._nexus_error("'BEGIN' found without completion of previous block",
NexusReader.IncompleteBlockError)
else:
# unknown block
if token is not None and self.store_ignored_blocks:
b = self._read_block_without_processing(token=token)
self._ignored_blocks.append(b)
else:
token = self._consume_to_end_of_block(token)
###########################################################################
## TAXA BLOCK
def _parse_taxa_block(self):
token = ''
self._nexus_tokenizer.allow_eof = False
self._nexus_tokenizer.skip_to_semicolon() # move past BEGIN statement
title = None
taxon_namespace = None
#while not (token == 'END' or token == 'ENDBLOCK') \
# and not self._nexus_tokenizer.is_eof() \
# and not token==None:
while not (token == 'END' or token == 'ENDBLOCK'):
token = self._nexus_tokenizer.next_token_ucase()
if token == "TITLE":
token = self._parse_title_statement()
taxon_namespace = self._new_taxon_namespace(token)
if token == 'DIMENSIONS':
self._parse_dimensions_statement()
if token == 'TAXLABELS':
if taxon_namespace is None:
taxon_namespace = self._new_taxon_namespace()
self._nexus_tokenizer.process_and_clear_comments_for_item(
self._global_annotations_target,
self.extract_comment_metadata)
self._parse_taxlabels_statement(taxon_namespace)
self._nexus_tokenizer.skip_to_semicolon() # move past END statement
self._nexus_tokenizer.allow_eof = True
def _get_taxon(self, taxon_namespace, label):
if not self._file_specified_ntax or len(taxon_namespace) < self._file_specified_ntax:
taxon = taxon_namespace.require_taxon(label=label,
is_case_sensitive=self.case_sensitive_taxon_labels)
else:
taxon = taxon_namespace.get_taxon(label=label,
is_case_sensitive=self.case_sensitive_taxon_labels)
if taxon is None:
raise self._too_many_taxa_error(taxon_namespace=taxon_namespace, label=label)
return taxon
def _parse_taxlabels_statement(self, taxon_namespace=None):
"""
Processes a TAXLABELS command. Assumes that the file reader is
positioned right after the "TAXLABELS" token in a TAXLABELS command.
"""
if taxon_namespace is None:
taxon_namespace = self._get_taxon_namespace()
token = self._nexus_tokenizer.next_token()
# Construct label lookup set
# The get_taxon call is expensive for large taxon namespaces as it requires
# a linear search. This causes significant performance penalties for loading
# very large trees into an empty taxon namespace as each new taxon requires
# a worst case search of the existing namespace before it can be inserted.
# To alleviate this, we build a temporary one-time set of all the labels
# in the taxon namespace. Now we can determine in constant-time whether
# a label token corresponds to a new taxon that requires insertion,
# or if an existing taxon can be fetched with get_taxon.
label_set = set([])
for taxon in taxon_namespace._taxa:
if taxon_namespace.is_case_sensitive:
label_set.add(taxon.label)
else:
label_set.add(taxon.lower_cased_label)
while token != ';':
label = token
# Convert the token to the appropriate case to check against label set
if taxon_namespace.is_case_sensitive:
check_label = label
else:
check_label = label.lower()
if check_label in label_set:
taxon = taxon_namespace.get_taxon(label=label)
else:
if len(taxon_namespace) >= self._file_specified_ntax and not self.attached_taxon_namespace and not self.unconstrained_taxa_accumulation_mode:
raise self._too_many_taxa_error(taxon_namespace=taxon_namespace, label=label)
taxon = taxon_namespace.new_taxon(label=label)
# Add the new label to the label lookup set too
if taxon_namespace.is_case_sensitive:
label_set.add(taxon.label)
else:
label_set.add(taxon.lower_cased_label)
token = self._nexus_tokenizer.next_token()
self._nexus_tokenizer.process_and_clear_comments_for_item(taxon,
self.extract_comment_metadata)
###########################################################################
## LINK/TITLE PARSERS (How Mesquite handles multiple TAXA blocks)
def _parse_title_statement(self):
"""
Processes a MESQUITE 'TITLE' statement.
Assumes current token is 'TITLE'
"""
if self._nexus_tokenizer.cast_current_token_to_ucase() != "TITLE":
raise self._nexus_error("Expecting 'TITLE' token, but instead found '{}'".format(self._nexus_tokenizer.cast_current_token_to_ucase()))
title = self._nexus_tokenizer.require_next_token()
sc = self._nexus_tokenizer.require_next_token()
if sc != ";":
raise self._nexus_error("Expecting ';' token, but instead found '{}'".format(sc))
return title
def _parse_link_statement(self):
"""
Processes a MESQUITE 'LINK' statement.
"""
# TODO: this is now pretty ugly
# need to refactor with more abstraction
links = {}
token = self._nexus_tokenizer.next_token_ucase()
while token != ';':
if token == 'TAXA':
token = self._nexus_tokenizer.next_token()
if token != "=":
raise self._nexus_error("expecting '=' after link taxa")
token = self._nexus_tokenizer.next_token()
links['taxa'] = token
token = self._nexus_tokenizer.next_token()
if token == 'CHARACTERS':
token = self._nexus_tokenizer.next_token()
if token != "=":
raise self._nexus_error("expecting '=' after link characters")
token = self._nexus_tokenizer.next_token()
links['characters'] = token
token = self._nexus_tokenizer.next_token()
if token != ";":
self._nexus_tokenizer.skip_to_semicolon()
return links
###########################################################################
## CHARACTER/DATA BLOCK PARSERS AND SUPPORT
def _parse_characters_data_block(self):
token = self._nexus_tokenizer.cast_current_token_to_ucase()
if token != "CHARACTERS" and token != "DATA":
raise self._nexus_error("Expecting 'CHARACTERS' or 'DATA' token, but instead found '{}'".format(token))
if self.exclude_chars:
self._consume_to_end_of_block(self._nexus_tokenizer.current_token)
return
self._nexus_tokenizer.skip_to_semicolon() # move past BEGIN command
block_title = None
link_title = None
self._data_type = "standard" # set as default
while (token != 'END'
and token != 'ENDBLOCK'
and not self._nexus_tokenizer.is_eof()
and not token==None):
token = self._nexus_tokenizer.next_token_ucase()
if token == 'TITLE':
block_title = self._parse_title_statement()
elif token == "LINK":
link_title = self._parse_link_statement().get('taxa')
elif token == 'DIMENSIONS':
self._parse_dimensions_statement()
elif token == 'FORMAT':
self._parse_format_statement()
elif token == 'MATRIX':
self._parse_matrix_statement(block_title=block_title, link_title=link_title)
elif token == 'BEGIN':
raise self._nexus_error("'BEGIN' found without completion of previous block",
NexusReader.IncompleteBlockError)
# token = self._nexus_tokenizer.cast_current_token_to_ucase()
self._nexus_tokenizer.skip_to_semicolon() # move past END command
def _build_state_alphabet(self, char_block, symbols):
if self._gap_char and self._gap_char in symbols:
symbols = [s for s in symbols if s != self._gap_char]
sa = self._new_state_alphabet(
fundamental_states=symbols,
no_data_symbol=self._missing_char,
gap_symbol=self._gap_char,
case_sensitive=False)
char_block.state_alphabets = [sa]
char_block.default_state_alphabet = char_block.state_alphabets[0]
def _parse_format_statement(self):
"""
Processes a FORMAT command. Assumes that the file reader is
positioned right after the "FORMAT" token in a FORMAT command.
"""
token = self._nexus_tokenizer.require_next_token_ucase()
while token != ';':
if token == 'DATATYPE':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == '=':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == "DNA" or token == "NUCLEOTIDES":
self._data_type = "dna"
elif token == "RNA":
self._data_type = "rna"
elif token == "NUCLEOTIDE":
self._data_type = "nucleotide"
elif token == "PROTEIN":
self._data_type = "protein"
elif token == "CONTINUOUS":
self._data_type = "continuous"
else:
# defaults to STANDARD elif token == "STANDARD":
self._data_type = "standard"
self._symbols = "0123456789"
else:
raise self._nexus_error("Expecting '=' after DATATYPE keyword")
token = self._nexus_tokenizer.require_next_token_ucase()
elif token == 'SYMBOLS':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == '=':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == '"':
self._symbols = ""
token = self._nexus_tokenizer.require_next_token_ucase()
while token != '"':
if token not in self._symbols:
self._symbols = self._symbols + token
token = self._nexus_tokenizer.require_next_token_ucase()
else:
raise self._nexus_error("Expecting '\"' before beginning SYMBOLS list")
else:
raise self._nexus_error("Expecting '=' after SYMBOLS keyword")
token = self._nexus_tokenizer.require_next_token_ucase()
elif token == 'GAP':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == '=':
token = self._nexus_tokenizer.require_next_token_ucase()
self._gap_char = token
else:
raise self._nexus_error("Expecting '=' after GAP keyword")
token = self._nexus_tokenizer.require_next_token_ucase()
elif token == 'INTERLEAVE':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == '=':
token = self._nexus_tokenizer.require_next_token_ucase()
if token.startswith("N"):
self._interleave = False
else:
self._interleave = True
token = self._nexus_tokenizer.require_next_token_ucase()
else:
self._interleave = True
elif token == 'MISSING':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == '=':
token = self._nexus_tokenizer.require_next_token_ucase()
self._missing_char = token
else:
raise self._nexus_error("Expecting '=' after MISSING keyword")
token = self._nexus_tokenizer.require_next_token_ucase()
elif token == 'MATCHCHAR':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == '=':
token = self._nexus_tokenizer.require_next_token_ucase()
self._match_char = frozenset([token, token.lower()])
else:
raise self._nexus_error("Expecting '=' after MISSING keyword")
token = self._nexus_tokenizer.require_next_token_ucase()
elif token == 'BEGIN':
raise self._nexus_error("'BEGIN' found without completion of previous block",
NexusReader.IncompleteBlockError)
else:
token = self._nexus_tokenizer.require_next_token_ucase()
def _parse_dimensions_statement(self):
"""
Processes a DIMENSIONS command. Assumes that the file reader is
positioned right after the "DIMENSIONS" token in a DIMENSIONS command.
"""
token = self._nexus_tokenizer.require_next_token_ucase()
while token != ';':
if token == 'NTAX':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == '=':
token = self._nexus_tokenizer.require_next_token_ucase()
if token.isdigit():
self._file_specified_ntax = int(token)
else:
raise self._nexus_error('Expecting numeric value for NTAX')
else:
raise self._nexus_error("Expecting '=' after NTAX keyword")
elif token == 'NCHAR':
token = self._nexus_tokenizer.require_next_token_ucase()
if token == '=':
token = self._nexus_tokenizer.require_next_token_ucase()
if token.isdigit():
self._file_specified_nchar = int(token)
else:
raise self._nexus_error("Expecting numeric value for NCHAR")
else:
raise self._nexus_error("Expecting '=' after NCHAR keyword")
elif token == 'BEGIN':
raise self._nexus_error("'BEGIN' found without completion of previous block",
NexusReader.IncompleteBlockError)
token = self._nexus_tokenizer.require_next_token_ucase()
def _parse_matrix_statement(self, block_title=None, link_title=None):
"""
Processes a MATRIX command. Assumes that the file reader
is positioned right after the "MATRIX" token in a MATRIX command,
and that NTAX and NCHAR have been specified accurately.
"""
if not self._file_specified_ntax:
raise self._nexus_error('NTAX must be defined by DIMENSIONS command to non-zero value before MATRIX command')
elif not self._file_specified_nchar:
raise self._nexus_error('NCHAR must be defined by DIMENSIONS command to non-zero value before MATRIX command')
taxon_namespace = self._get_taxon_namespace(link_title)
char_block = self._new_char_matrix(
self._data_type,
taxon_namespace=taxon_namespace,
title=block_title)
if self._data_type == "continuous":
self._process_continuous_matrix_data(char_block)
else:
self._process_discrete_matrix_data(char_block)
def _process_continuous_matrix_data(self, char_block):
taxon_namespace = char_block.taxon_namespace
token = self._nexus_tokenizer.next_token()
first_sequence_defined = None
if self._interleave:
try:
while token != ";" and not self._nexus_tokenizer.is_eof():
taxon = self._get_taxon(taxon_namespace=taxon_namespace, label=token)
self._read_continuous_character_values(char_block[taxon])
# if first_sequence_defined is None:
# first_sequence_defined = char_block[taxon]
token = self._nexus_tokenizer.next_token()
except NexusReader.BlockTerminatedException:
token = self._nexus_tokenizer.next_token()
else:
while token != ';' and not self._nexus_tokenizer.is_eof():
taxon = self._get_taxon(taxon_namespace=taxon_namespace, label=token)
self._read_continuous_character_values(char_block[taxon])
# if first_sequence_defined is None:
# first_sequence_defined = char_block[taxon]
if len(char_block[taxon]) < self._file_specified_nchar:
raise self._nexus_error("Insufficient characters given for taxon '{}': expecting {} but only found {} ('{}')".format(taxon.label, self._file_specified_nchar, len(char_block[taxon]), char_block[taxon].symbols_as_string()))
token = self._nexus_tokenizer.next_token()
# if self._interleave:
# raise NotImplementedError("Continuous interleaved characters in NEXUS schema not yet supported")
# taxon_namespace = char_block.taxon_namespace
# token = self._nexus_tokenizer.next_token()
# while token != ';' and not self._nexus_tokenizer.is_eof():
# taxon = self._get_taxon(taxon_namespace=taxon_namespace, label=token)
# while len(char_block[taxon]) < self._file_specified_nchar and not self._nexus_tokenizer.is_eof():
# # char_group = self._nexus_tokenizer.next_token(ignore_punctuation="-+")
# char_group = self._nexus_tokenizer.next_token()
# char_block[taxon].append(dataobject.CharacterDataCell(value=float(char_group)))
# if len(char_block[taxon]) < self._file_specified_nchar:
# raise self._nexus_error("Insufficient characters given for taxon '%s': expecting %d but only found %d ('%s')" \
# % (taxon.label, self._file_specified_nchar, len(char_block[taxon]), char_block[taxon].symbols_as_string()))
# token = self._nexus_tokenizer.next_token()
def _process_discrete_matrix_data(self, char_block):
if self._data_type == "standard":
self._build_state_alphabet(char_block, self._symbols)
taxon_namespace = char_block.taxon_namespace
token = self._nexus_tokenizer.next_token()
state_alphabet = char_block.default_state_alphabet
first_sequence_defined = None
if self._interleave:
try:
while token != ";" and not self._nexus_tokenizer.is_eof():
taxon = self._get_taxon(taxon_namespace=taxon_namespace, label=token)
self._read_character_states(char_block[taxon], state_alphabet, first_sequence_defined)
if first_sequence_defined is None:
first_sequence_defined = char_block[taxon]
token = self._nexus_tokenizer.next_token()
except NexusReader.BlockTerminatedException:
token = self._nexus_tokenizer.next_token()
else:
while token != ';' and not self._nexus_tokenizer.is_eof():
taxon = self._get_taxon(taxon_namespace=taxon_namespace, label=token)
self._read_character_states(char_block[taxon], state_alphabet, first_sequence_defined)
if first_sequence_defined is None:
first_sequence_defined = char_block[taxon]
if len(char_block[taxon]) < self._file_specified_nchar:
raise self._nexus_error("Insufficient characters given for taxon '%s': expecting %d but only found %d ('%s')" \
% (taxon.label, self._file_specified_nchar, len(char_block[taxon]), char_block[taxon].symbols_as_string()))
token = self._nexus_tokenizer.next_token()
def _get_state_for_multistate_tokens(self,
state_char_seq,
multistate_type,
state_alphabet):
try:
state = state_alphabet.match_state(state_char_seq,
state_denomination=multistate_type)
except KeyError:
try:
if multistate_type == state_alphabet.AMBIGUOUS_STATE:
sae = state_alphabet.new_ambiguous_state(
symbol=None,
member_state_symbols=state_char_seq)
else:
sae = state_alphabet.new_polymorphic_state(
symbol=None,
member_state_symbols=state_char_seq)
except KeyError:
raise self._nexus_error("Unrecognized state symbols encountered in multistate sequence: '{}'".format(state_char_seq))
else:
return sae
else:
return state
###########################################################################
## TREE / TREE BLOCK PARSERS
def _parse_tree_statement(self, tree_factory, taxon_symbol_mapper):
"""
Processes a TREE command. Assumes that the file reader is
positioned right after the "TREE" token in a TREE command.
Calls on the NewickStatementParser of the trees module.
"""
token = self._nexus_tokenizer.next_token()
if token == '*':
token = self._nexus_tokenizer.next_token()
tree_name = token
token = self._nexus_tokenizer.next_token()
pre_tree_comments = self._nexus_tokenizer.pull_captured_comments()
if token != '=':
raise self._nexus_error("Expecting '=' in definition of Tree '%s' but found '%s'" % (tree_name, token))
tree_comments = self._nexus_tokenizer.pull_captured_comments()
# advance to '('; comments will be processed by newick reader
self._nexus_tokenizer.next_token()
tree = self._build_tree_from_newick_tree_string(tree_factory, taxon_symbol_mapper)
tree.label = tree_name
nexusprocessing.process_comments_for_item(tree, pre_tree_comments, self.extract_comment_metadata)
nexusprocessing.process_comments_for_item(tree, tree_comments, self.extract_comment_metadata)
# if self.extract_comment_metadata:
# annotations = nexustokenizer.parse_comment_metadata(tree_comments)
# for annote in annotations:
# tree.annotations.add(annote)
# if pre_tree_metadata_comments:
# pre_tree_annotations = nexustokenizer.parse_comment_metadata(pre_tree_metadata_comments)
# for annote in pre_annotations:
# tree.annotations.add(annote)
# if tree_comments is not None and len(tree_comments) > 0:
# tree.comments.extend(tree_comments)
# if self._nexus_tokenizer.current_token != ';':
# self._nexus_tokenizer.skip_to_semicolon()
return tree
def _build_tree_from_newick_tree_string(self, tree_factory, taxon_symbol_mapper):
tree = self.newick_reader._parse_tree_statement(
nexus_tokenizer=self._nexus_tokenizer,
tree_factory=tree_factory,
taxon_symbol_map_fn=taxon_symbol_mapper.require_taxon_for_symbol)
return tree
def _parse_translate_statement(self, taxon_namespace, taxon_symbol_mapper=None):
"""
Processes a TRANSLATE command. Assumes that the file reader is
positioned right after the "TRANSLATE" token in a TRANSLATE command.
"""
token = self._nexus_tokenizer.current_token
if taxon_symbol_mapper is None:
taxon_symbol_mapper = self._get_taxon_symbol_mapper(taxon_namespace=taxon_namespace)
else:
assert taxon_symbol_mapper.taxon_namespace is taxon_namespace
if self._file_specified_ntax is None:
# Not yet parsed TAXA block: NEXUS file without TAXA block
# Badly-formed NEXUS file, yet widely-found in the wild
# Override namespace modification lock
taxon_namespace.is_mutable = True
while True:
translation_token = self._nexus_tokenizer.next_token()
if translation_token == ";" and not self._nexus_tokenizer.is_token_quoted:
raise self._nexus_error("Expecting translation token but found ';' instead")
translation_label = self._nexus_tokenizer.next_token()
try:
taxon = taxon_namespace.require_taxon(label=translation_label)
except error.ImmutableTaxonNamespaceError:
exc = self._undefined_taxon_error(taxon_namespace=taxon_namespace, label=translation_label)
exc.__context__ = None # Python 3.0, 3.1, 3.2
exc.__cause__ = None # Python 3.3, 3.4
raise exc
taxon_symbol_mapper.add_translate_token(translation_token, taxon)
token = self._nexus_tokenizer.next_token() # ","
if (not token) or (token == ';'):
break
if token != ',':
raise self._nexus_error("Expecting ',' in TRANSLATE statement after definition for %s = '%s', but found '%s' instead." % (translation_token, translation_label, token))
return taxon_symbol_mapper
def _parse_trees_block(self):
"""
Expectations:
- current token: "TREES" [part of "BEGIN TREES"]
"""
token = self._nexus_tokenizer.cast_current_token_to_ucase()
if token != "TREES":
raise self._nexus_error("Expecting 'TREES' token, but instead found '{}'".format(token))
if self.exclude_trees:
self._consume_to_end_of_block(self._nexus_tokenizer.current_token)
return
self._nexus_tokenizer.skip_to_semicolon() # move past "BEGIN TREES" command
link_title = None
taxon_namespace = None
taxon_symbol_mapper = None
trees_block = None
block_title = None
# while ((not self._nexus_tokenizer.is_eof())
# and self._nexus_tokenizer.current_token is not None
# and self._nexus_tokenixer.current_token != 'END'
# and self._nexus_tokenixer.current_token != 'ENDBLOCK'):
while ((not self._nexus_tokenizer.is_eof())
and token is not None
and token != 'END'
and token != 'ENDBLOCK'):
token = self._nexus_tokenizer.next_token_ucase()
if token == 'LINK':
link_title = self._parse_link_statement().get("taxa")
elif token == 'TITLE':
block_title = self._parse_title_statement()
token = "" # clear; repopulate at start of loop
elif token == 'TRANSLATE':
if taxon_namespace is None:
taxon_namespace = self._get_taxon_namespace(link_title)
taxon_symbol_mapper = self._parse_translate_statement(taxon_namespace)
token = "" # clear; repopulate at start of loop
elif token == 'TREE':
if taxon_namespace is None:
taxon_namespace = self._get_taxon_namespace(link_title)
if taxon_symbol_mapper is None:
taxon_symbol_mapper = self._get_taxon_symbol_mapper(taxon_namespace=taxon_namespace)
pre_tree_comments = self._nexus_tokenizer.pull_captured_comments()
if trees_block is None:
trees_block = self._new_tree_list(taxon_namespace=taxon_namespace, title=block_title)
# All comments leading up to the first 'TREE' statement assumed
# to belong to the TreeList corresponding to the TREES block
nexusprocessing.process_comments_for_item(
trees_block,
pre_tree_comments,
self.extract_comment_metadata)
tree_factory = trees_block.new_tree
while True:
## After the following, the current token
## will be the token immediately following
## the terminating semi-colon of a tree
## statement. Typically, this will be
## 'TREE' if there is another tree, or
## 'END'/'ENDBLOCK'.
tree = self._parse_tree_statement(
tree_factory=tree_factory,
taxon_symbol_mapper=taxon_symbol_mapper)
if self._nexus_tokenizer.is_eof() or not self._nexus_tokenizer.current_token:
break
if self._nexus_tokenizer.cast_current_token_to_ucase() != "TREE":
token = self._nexus_tokenizer.current_token
break
elif token == 'BEGIN':
raise self._nexus_error("'BEGIN' found without completion of previous block",
NexusReader.IncompleteBlockError)
self._nexus_tokenizer.skip_to_semicolon() # move past END command
def _parse_charset_statement(self, block_title=None, link_title=None):
"""
Parses a character set description. Assumes token stream is positioned right after 'charset' command.
"""
char_matrix = self._get_char_matrix(title=link_title)
keyword = self._nexus_tokenizer.current_token
token = self._nexus_tokenizer.next_token()
if self._nexus_tokenizer.is_eof() or not token:
raise self._nexus_error('Unexpected end of file or null token')
else:
if not token:
raise self._nexus_error("Unexpected end of file or null token")
else:
charset_name = token
token = self._nexus_tokenizer.next_token()
if not token:
raise self._nexus_error("Unexpected end of file or null token")
elif token != '=':
raise self._nexus_error('Expecting "=" after character set name "%s", but instead found "%s"' % (charset_name, token))
else:
positions = self._parse_positions(adjust_to_zero_based=True)
char_matrix.new_character_subset(charset_name, positions)
def _parse_positions(self, adjust_to_zero_based=True, verify=True):
"""
Parses a character position list. Expects next character read to be the first item in a position list.
"""
positions = []
# hyphens_as_tokens = self._nexus_tokenizer.hyphens_as_tokens
# self._nexus_tokenizer.hyphens_as_tokens = True
self._nexus_tokenizer.set_hyphens_as_captured_delimiters(True)
token = self._nexus_tokenizer.next_token()
max_positions = self._file_specified_nchar
if self._nexus_tokenizer.is_eof() or not token:
raise self._nexus_error('Unexpected end of file or null token')
while token != ';' and token != ',' and not self._nexus_tokenizer.is_eof():
if not token:
break
if token.upper() == 'ALL':
positions = range(1, max_positions + 1)
break
elif token.isdigit():
start = int(token)
token = self._nexus_tokenizer.next_token()
if token:
if token == ',' or token.isdigit() or token == ';':
positions.append(start)
elif token == '-':
token = self._nexus_tokenizer.next_token()
if token:
if token.isdigit() or token == '.':
if token == '.':
end = max_positions
#token = self._nexus_tokenizer.next_token()
else:
end = int(token)
#token = self._nexus_tokenizer.next_token()
token = self._nexus_tokenizer.next_token()
if token:
if token == '\\' or token == '/': # (NEXUS standard only accepts '\')
token = self._nexus_tokenizer.next_token()
if token:
if token.isdigit():
step = int(token)
#token = self._nexus_tokenizer.next_token()
else:
raise self._nexus_error('Expecting digit but found "%s".' % (token))
else:
raise self._nexus_error(r'Expecting other tokens after "\", but no more found.')
token = self._nexus_tokenizer.next_token()
else:
step = 1
else:
step = 1
for q in range(start, end+1, step):
if q <= max_positions:
positions.append(q)
else:
raise self._nexus_error('Expecting digit or ".", but found "%s".' % (token))
else:
raise self._nexus_error('Expecting other tokens after "-", but no more found.')
else:
raise self._nexus_error('Expecting digit or "all", but found "%s".' % (token))
else:
positions.append(start)
self._nexus_tokenizer.set_hyphens_as_captured_delimiters(False)
positions = list(set(positions))
positions.sort()
if verify:
for position in positions:
if position > max_positions:
raise self._nexus_error("Specified position %d, but maximum position is %d" % (position, max_positions))
if adjust_to_zero_based:
positions = [position - 1 for position in positions]
return positions # make unique and return
def _consume_to_end_of_block(self, token=None):
if token:
token = token.upper()
else:
token = "DUMMY"
while not (token == 'END' or token == 'ENDBLOCK') \
and not self._nexus_tokenizer.is_eof() \
and not token==None:
self._nexus_tokenizer.skip_to_semicolon()
token = self._nexus_tokenizer.next_token_ucase()
return token
def _read_block_without_processing(self, token=None):
# used for unknown blocks we want to save
# NOT (really) TESTED
# Everybody else except Jeet: (REALLY) DO NOT USE!
# Jeet: SORTA DO NOT USE WITHOUT MORE TESTING
if token:
token = token.upper()
block = ["BEGIN", token]
old_uncaptured_delimiters = self._nexus_tokenizer.uncaptured_delimiters
old_captured_delimiters = self._nexus_tokenizer.captured_delimiters
to_switch = "\n\r"
for ch in to_switch:
self._nexus_tokenizer.uncaptured_delimiters.discard(ch)
self._nexus_tokenizer.captured_delimiters.add(ch)
while not (token == 'END' or token == 'ENDBLOCK') \
and not self._nexus_tokenizer.is_eof() \
and not token==None:
token = self._nexus_tokenizer.require_next_token()
uctoken = token.upper()
if uctoken == "END" or uctoken == "ENDBLOCK":
token = uctoken
block.append(token)
self._nexus_tokenizer.uncaptured_delimiters = old_uncaptured_delimiters
self._nexus_tokenizer.captured_delimiters = old_captured_delimiters
self._nexus_tokenizer.skip_to_semicolon() # move past end
block.append(";")
return " ".join(block)
def _read_character_states(self,
character_data_vector,
state_alphabet,
first_sequence_defined,
):
"""
Reads character sequence data substatement until the number of
character states read is equal to ``self._file_specified_nchar`` (with
multi-state characters, such as '(AG)' counting as a single
state) or, if ``self._interleave`` is |True|, until an EOL is
reached.
Given a sequence of characters, with ambiguities denoted by
`{<STATES>}`, this returns a list of state alphabet elements.
For example, the following sequence:
"ACTG(AC)GGT(CGG)(CG)GG"
will result in a list such as:
[<A>, <C>, <T>, <G>, <AC>, <G>, <G>, <T>, <CGG>, <CG>, <G>, <G>]
where `<.>` is a StateIdentity object with the characters within the
brackets as symbol(s).
"""
if self._interleave:
self._nexus_tokenizer.set_capture_eol(True)
states_to_add = []
while len(character_data_vector) + len(states_to_add) < self._file_specified_nchar:
token = self._nexus_tokenizer.require_next_token()
if token == "{" or token == "(":
if token == "{":
# multistate_type = dataobject.StateIdentity.AMBIGUOUS_STATE
multistate_type = state_alphabet.AMBIGUOUS_STATE
closing_token = "}"
else:
# multistate_type = dataobject.StateIdentity.POLYMORPHIC_STATE
multistate_type = state_alphabet.POLYMORPHIC_STATE
closing_token = ")"
multistate_tokens = []
while True:
token = self._nexus_tokenizer.require_next_token()
if token == closing_token:
break
multistate_tokens.append(token)
c = "".join(multistate_tokens)
state = self._get_state_for_multistate_tokens(c, multistate_type, state_alphabet)
if len(character_data_vector) + len(states_to_add) == self._file_specified_nchar:
raise self._too_many_characters_error(c)
states_to_add.append(state)
elif token == "\r" or token == "\n":
if self._interleave:
break
elif token == ";":
raise NexusReader.BlockTerminatedException
else:
for c in token:
if c in self._match_char:
try:
state = first_sequence_defined[len(character_data_vector) + len(states_to_add)]
except TypeError:
exc = self._nexus_error("Cannot dereference MATCHCHAR '{}' on first sequence".format(c), NexusReader.NexusReaderError)
exc.__context__ = None # Python 3.0, 3.1, 3.2
exc.__cause__ = None # Python 3.3, 3.4
raise exc
except IndexError:
exc = self._nexus_error("Cannot dereference MATCHCHAR '{}': current position ({}) exceeds length of first sequence ({})".format(c,
len(character_data_vector) + len(states_to_add) + 1,
len(first_sequence_defined),
NexusReader.NexusReaderError))
exc.__context__ = None # Python 3.0, 3.1, 3.2
exc.__cause__ = None # Python 3.3, 3.4
raise exc
else:
try:
state = state_alphabet.full_symbol_state_map[c]
except KeyError:
exc = self._nexus_error("Unrecognized character state symbol for state alphabet '{}' ({}) : '{}'".format(
state_alphabet.label,
state_alphabet.__class__.__name__,
c),
NexusReader.InvalidCharacterStateSymbolError)
exc.__context__ = None # Python 3.0, 3.1, 3.2
exc.__cause__ = None # Python 3.3, 3.4
raise exc
if len(character_data_vector) + len(states_to_add) == self._file_specified_nchar:
raise self._too_many_characters_error(c)
states_to_add.append(state)
if self._interleave:
self._nexus_tokenizer.set_capture_eol(False)
character_data_vector.extend(states_to_add)
return character_data_vector
def _read_continuous_character_values(self,
character_data_vector,
datatype=float,
):
"""
Reads character sequence data substatement until the number of
character states read is equal to ``self._file_specified_nchar`` (with
multi-state characters, such as '(AG)' counting as a single
state) or, if ``self._interleave`` is |True|, until an EOL is
reached.
"""
if self._interleave:
self._nexus_tokenizer.set_capture_eol(True)
while len(character_data_vector) < self._file_specified_nchar:
token = self._nexus_tokenizer.require_next_token()
if token == "\r" or token == "\n":
if self._interleave:
break
elif token == ";":
raise NexusReader.BlockTerminatedException
else:
try:
state = float(token)
except ValueError:
exc = self._nexus_error("Invalid value for continuous character type: '{invalid_value}'".format(datatype=datatype, invalid_value=token),
NexusReader.InvalidContinuousCharacterValueError)
exc.__context__ = None # Python 3.0, 3.1, 3.2
exc.__cause__ = None # Python 3.3, 3.4
raise exc
# if c in self._match_char:
# try:
# state = first_sequence_defined[len(character_data_vector)]
# except TypeError:
# exc = self._nexus_error("Cannot dereference MATCHCHAR '{}' on first sequence".format(c), NexusReader.NexusReaderError)
# exc.__context__ = None # Python 3.0, 3.1, 3.2
# exc.__cause__ = None # Python 3.3, 3.4
# raise exc
# except IndexError:
# exc = self._nexus_error("Cannot dereference MATCHCHAR '{}': current position ({}) exceeds length of first sequence ({})".format(c,
# len(character_data_vector)+1,
# len(first_sequence_defined),
# NexusReader.NexusReaderError))
# exc.__context__ = None # Python 3.0, 3.1, 3.2
# exc.__cause__ = None # Python 3.3, 3.4
# raise exc
# else:
# try:
# state = state_alphabet.full_symbol_state_map[c]
# except KeyError:
# exc = self._nexus_error("Unrecognized character state symbol for state alphabet '{}' ({}) : '{}'".format(
# state_alphabet.label,
# state_alphabet.__class__.__name__,
# c),
# NexusReader.InvalidCharacterStateSymbolError)
# exc.__context__ = None # Python 3.0, 3.1, 3.2
# exc.__cause__ = None # Python 3.3, 3.4
# raise exc
if len(character_data_vector) == self._file_specified_nchar:
raise self._too_many_characters_error(token)
character_data_vector.append(state)
if self._interleave:
self._nexus_tokenizer.set_capture_eol(False)
return character_data_vector | PypiClean |
/FoilMesh-0.0.8.tar.gz/FoilMesh-0.0.8/foilmesh/meshio/_cli/_compress.py | import os
import pathlib
from .. import ansys, cgns, gmsh, h5m, mdpa, ply, stl, vtk, vtu, xdmf
from .._common import error
from .._helpers import _filetypes_from_path, read, reader_map
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to compress")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--max",
"-max",
action="store_true",
help="maximum compression",
default=False,
)
def compress(args):
if args.input_format:
fmts = [args.input_format]
else:
fmts = _filetypes_from_path(pathlib.Path(args.infile))
# pick the first
fmt = fmts[0]
size = os.stat(args.infile).st_size
print(f"File size before: {size / 1024 ** 2:.2f} MB")
mesh = read(args.infile, file_format=args.input_format)
# # Some converters (like VTK) require `points` to be contiguous.
# mesh.points = np.ascontiguousarray(mesh.points)
# write it out
if fmt == "ansys":
ansys.write(args.infile, mesh, binary=True)
elif fmt == "cgns":
cgns.write(
args.infile, mesh, compression="gzip", compression_opts=9 if args.max else 4
)
elif fmt == "gmsh":
gmsh.write(args.infile, mesh, binary=True)
elif fmt == "h5m":
h5m.write(
args.infile, mesh, compression="gzip", compression_opts=9 if args.max else 4
)
elif fmt == "mdpa":
mdpa.write(args.infile, mesh, binary=True)
elif fmt == "ply":
ply.write(args.infile, mesh, binary=True)
elif fmt == "stl":
stl.write(args.infile, mesh, binary=True)
elif fmt == "vtk":
vtk.write(args.infile, mesh, binary=True)
elif fmt == "vtu":
vtu.write(
args.infile, mesh, binary=True, compression="lzma" if args.max else "zlib"
)
elif fmt == "xdmf":
xdmf.write(
args.infile,
mesh,
data_format="HDF",
compression="gzip",
compression_opts=9 if args.max else 4,
)
else:
error(f"Don't know how to compress {args.infile}.")
exit(1)
size = os.stat(args.infile).st_size
print(f"File size after: {size / 1024 ** 2:.2f} MB") | PypiClean |
/2vyper-0.3.0.tar.gz/2vyper-0.3.0/src/twovyper/translation/expression.py | from functools import reduce
from itertools import chain
from typing import List, Optional, Tuple, Callable
from twovyper.ast import ast_nodes as ast, names, types
from twovyper.ast.arithmetic import Decimal
from twovyper.ast.nodes import VyperFunction, VyperInterface, VyperVar, VyperEvent, VyperProgram
from twovyper.ast.types import MapType, ArrayType, StructType, AddressType, ContractType, InterfaceType
from twovyper.exceptions import UnsupportedException
from twovyper.translation import mangled
from twovyper.translation import helpers
from twovyper.translation.context import Context
from twovyper.translation.abstract import NodeTranslator
from twovyper.translation.allocation import AllocationTranslator
from twovyper.translation.arithmetic import ArithmeticTranslator
from twovyper.translation.balance import BalanceTranslator
from twovyper.translation.model import ModelTranslator
from twovyper.translation.resource import ResourceTranslator
from twovyper.translation.state import StateTranslator
from twovyper.translation.type import TypeTranslator
from twovyper.translation.variable import TranslatedVar
from twovyper.translation.wrapped_viper_ast import WrappedViperAST
from twovyper.utils import switch, first_index
from twovyper.verification import rules
from twovyper.verification.error import Via
from twovyper.verification.model import ModelTransformation
from twovyper.viper.ast import ViperAST
from twovyper.viper.typedefs import Expr, Stmt
# noinspection PyUnusedLocal
class ExpressionTranslator(NodeTranslator):
def __init__(self, viper_ast: ViperAST):
super().__init__(viper_ast)
self.allocation_translator = AllocationTranslator(viper_ast)
self.arithmetic_translator = ArithmeticTranslator(viper_ast, self.no_reverts)
self.balance_translator = BalanceTranslator(viper_ast)
self.model_translator = ModelTranslator(viper_ast)
self.resource_translator = ResourceTranslator(viper_ast)
self.state_translator = StateTranslator(viper_ast)
self.type_translator = TypeTranslator(viper_ast)
self._bool_ops = {
ast.BoolOperator.AND: self.viper_ast.And,
ast.BoolOperator.OR: self.viper_ast.Or,
ast.BoolOperator.IMPLIES: self.viper_ast.Implies
}
self._comparison_ops = {
ast.ComparisonOperator.LT: self.viper_ast.LtCmp,
ast.ComparisonOperator.LTE: self.viper_ast.LeCmp,
ast.ComparisonOperator.GTE: self.viper_ast.GeCmp,
ast.ComparisonOperator.GT: self.viper_ast.GtCmp
}
def translate_top_level_expression(self, node: ast.Expr, res: List[Stmt], ctx: Context):
"""
A top level expression is an expression directly used in a statement.
Generally, we do not need to $wrap inside of a top level expression. Therefore, we only keep the information if
some expressions got unwrapped inside this expression and if this expression could get wrapped. If both is true,
only then we wrap this expression again.
Doing this, prevents the $wrap($unwrap($wrap($unwrap(...))) chain during translation.
If we are inside an interpreted scope, we do not wrap the result again.
"""
if isinstance(self.viper_ast, WrappedViperAST) and not ctx.inside_interpreted:
self.viper_ast.unwrapped_some_expressions = False
result = self.translate(node, res, ctx)
if self.viper_ast.unwrapped_some_expressions:
if types.is_numeric(node.type) and self.arithmetic_translator.is_unwrapped(result):
result = helpers.w_wrap(self.viper_ast, result)
return result
else:
return self.translate(node, res, ctx)
@property
def no_reverts(self) -> bool:
return False
@property
def spec_translator(self):
from twovyper.translation.specification import SpecificationTranslator
return SpecificationTranslator(self.viper_ast)
@property
def function_translator(self):
from twovyper.translation.function import FunctionTranslator
return FunctionTranslator(self.viper_ast)
def translate_Num(self, node: ast.Num, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
if isinstance(node.n, int):
if node.type == types.VYPER_BYTES32:
bts = node.n.to_bytes(32, byteorder='big')
elems = [self.viper_ast.IntLit(b, pos) for b in bts]
return self.viper_ast.ExplicitSeq(elems, pos)
else:
return self.viper_ast.IntLit(node.n, pos)
elif isinstance(node.n, Decimal):
return self.viper_ast.IntLit(node.n.scaled_value, pos)
else:
assert False
def translate_Bool(self, node: ast.Bool, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
return self.viper_ast.TrueLit(pos) if node.value else self.viper_ast.FalseLit(pos)
def translate_Name(self, node: ast.Name, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
if node.id == names.SELF and (node.type == types.VYPER_ADDRESS
or isinstance(node.type, (ContractType, InterfaceType))):
return ctx.self_address or helpers.self_address(self.viper_ast, pos)
elif ctx.inside_inline_analysis and node.id not in ctx.all_vars:
# Generate new local variable
variable_name = node.id
mangled_name = ctx.new_local_var_name(variable_name)
var = TranslatedVar(variable_name, mangled_name, node.type, self.viper_ast, pos)
ctx.locals[variable_name] = var
ctx.new_local_vars.append(var.var_decl(ctx))
return ctx.all_vars[node.id].local_var(ctx, pos)
def translate_ArithmeticOp(self, node: ast.ArithmeticOp, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
if node.op in self.arithmetic_translator.non_linear_ops:
# Since we need the information if an expression was wrapped, we can treat this expressions as top-level.
left = self.translate_top_level_expression(node.left, res, ctx)
right = self.translate_top_level_expression(node.right, res, ctx)
else:
left = self.translate(node.left, res, ctx)
right = self.translate(node.right, res, ctx)
return self.arithmetic_translator.arithmetic_op(left, node.op, right, node.type, res, ctx, pos)
def translate_BoolOp(self, node: ast.BoolOp, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
left = self.translate(node.left, res, ctx)
op = self._bool_ops[node.op]
right = self.translate(node.right, res, ctx)
return op(left, right, pos)
def translate_Not(self, node: ast.Not, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
operand = self.translate(node.operand, res, ctx)
return self.viper_ast.Not(operand, pos)
def translate_UnaryArithmeticOp(self, node: ast.UnaryArithmeticOp, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
operand = self.translate(node.operand, res, ctx)
return self.arithmetic_translator.unary_arithmetic_op(node.op, operand, node.type, res, ctx, pos)
def translate_IfExpr(self, node: ast.IfExpr, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
test = self.translate(node.test, res, ctx)
body = self.translate(node.body, res, ctx)
orelse = self.translate(node.orelse, res, ctx)
return self.viper_ast.CondExp(test, body, orelse, pos)
def translate_Comparison(self, node: ast.Comparison, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
lhs = self.translate(node.left, res, ctx)
op = self._comparison_ops[node.op]
rhs = self.translate(node.right, res, ctx)
return op(lhs, rhs, pos)
def translate_Containment(self, node: ast.Containment, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
value = self.translate(node.value, res, ctx)
expr_list = self.translate(node.list, res, ctx)
if node.op == ast.ContainmentOperator.IN:
return helpers.array_contains(self.viper_ast, value, expr_list, pos)
elif node.op == ast.ContainmentOperator.NOT_IN:
return helpers.array_not_contains(self.viper_ast, value, expr_list, pos)
else:
assert False
def translate_Equality(self, node: ast.Equality, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
lhs = self.translate(node.left, res, ctx)
rhs = self.translate(node.right, res, ctx)
if node.op == ast.EqualityOperator.EQ:
return self.type_translator.eq(lhs, rhs, node.left.type, ctx, pos)
elif node.op == ast.EqualityOperator.NEQ:
return self.type_translator.neq(lhs, rhs, node.left.type, ctx, pos)
else:
assert False
def translate_Attribute(self, node: ast.Attribute, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
# We don't support precise gas calculations, so we just return an unknown
# non-negative value
if node.attr == names.MSG_GAS and node.value.type == types.MSG_TYPE:
gas_name = ctx.new_local_var_name('gas')
gas_type = self.type_translator.translate(types.VYPER_UINT256, ctx)
gas = self.viper_ast.LocalVarDecl(gas_name, gas_type, pos)
ctx.new_local_vars.append(gas)
zero = self.viper_ast.IntLit(0, pos)
geq = self.viper_ast.GeCmp(gas.localVar(), zero, pos)
res.append(self.viper_ast.Inhale(geq, pos))
return gas.localVar()
expr = self.translate(node.value, res, ctx)
if isinstance(node.value.type, StructType):
# The value is a struct
struct_type = node.value.type
struct = expr
else:
# The value is an address
struct_type = AddressType()
contracts = ctx.current_state[mangled.CONTRACTS].local_var(ctx)
key_type = self.type_translator.translate(types.VYPER_ADDRESS, ctx)
value_type = helpers.struct_type(self.viper_ast)
struct = helpers.map_get(self.viper_ast, contracts, expr, key_type, value_type)
viper_type = self.type_translator.translate(node.type, ctx)
get = helpers.struct_get(self.viper_ast, struct, node.attr, viper_type, struct_type, pos)
return get
def translate_Subscript(self, node: ast.Subscript, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
value = self.translate(node.value, res, ctx)
index = self.translate(node.index, res, ctx)
node_type = node.value.type
if isinstance(node_type, MapType):
key_type = self.type_translator.translate(node_type.key_type, ctx)
value_type = self.type_translator.translate(node_type.value_type, ctx)
call = helpers.map_get(self.viper_ast, value, index, key_type, value_type, pos)
elif isinstance(node_type, ArrayType):
if not self.no_reverts:
self.type_translator.array_bounds_check(value, index, res, ctx)
element_type = self.type_translator.translate(node_type.element_type, ctx)
call = helpers.array_get(self.viper_ast, value, index, element_type, pos)
else:
assert False
return call
def translate_List(self, node: ast.List, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
if not node.elements:
viper_type = self.type_translator.translate(node.type.element_type, ctx)
return self.viper_ast.EmptySeq(viper_type, pos)
else:
elems = [self.translate(e, res, ctx) for e in node.elements]
return self.viper_ast.ExplicitSeq(elems, pos)
def translate_Str(self, node: ast.Str, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
if not node.s:
viper_type = self.type_translator.translate(node.type.element_type, ctx)
return self.viper_ast.EmptySeq(viper_type, pos)
else:
elems = [self.viper_ast.IntLit(e, pos) for e in bytes(node.s, 'utf-8')]
return self.viper_ast.ExplicitSeq(elems, pos)
def translate_Bytes(self, node: ast.Bytes, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
if not node.s:
viper_type = self.type_translator.translate(node.type.element_type, ctx)
return self.viper_ast.EmptySeq(viper_type, pos)
else:
elems = [self.viper_ast.IntLit(e, pos) for e in node.s]
return self.viper_ast.ExplicitSeq(elems, pos)
def translate_Tuple(self, node: ast.Tuple, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
new_ret = helpers.havoc_var(self.viper_ast, helpers.struct_type(self.viper_ast), ctx)
for idx, element in enumerate(node.elements):
viper_type = self.type_translator.translate(element.type, ctx)
value = self.translate(element, res, ctx)
new_ret = helpers.struct_set_idx(self.viper_ast, new_ret, value, idx, viper_type, pos)
return new_ret
def translate_FunctionCall(self, node: ast.FunctionCall, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
name = node.name
is_min = (name == names.MIN)
is_max = (name == names.MAX)
if is_min or is_max:
lhs = self.translate(node.args[0], res, ctx)
rhs = self.translate(node.args[1], res, ctx)
op = self.viper_ast.GtCmp if is_max else self.viper_ast.LtCmp
comp = op(lhs, rhs, pos)
return self.viper_ast.CondExp(comp, lhs, rhs, pos)
elif name == names.ADDMOD or name == names.MULMOD:
op1 = self.translate(node.args[0], res, ctx)
op2 = self.translate(node.args[1], res, ctx)
mod = self.translate(node.args[2], res, ctx)
cond = self.viper_ast.EqCmp(mod, self.viper_ast.IntLit(0, pos), pos)
self.fail_if(cond, [], res, ctx, pos)
operation = self.viper_ast.Add if name == names.ADDMOD else self.viper_ast.Mul
op_res = operation(op1, op2, pos)
return helpers.mod(self.viper_ast, op_res, mod, pos)
elif name == names.SQRT:
arg = self.translate(node.args[0], res, ctx)
zero = self.viper_ast.IntLit(0, pos)
lt = self.viper_ast.LtCmp(arg, zero, pos)
self.fail_if(lt, [], res, ctx, pos)
sqrt = helpers.sqrt(self.viper_ast, arg, pos)
return sqrt
elif name == names.FLOOR or name == names.CEIL:
# Let s be the scaling factor, then
# floor(d) == d < 0 ? (d - (s - 1)) / s : d / s
# ceil(d) == d < 0 ? d / s : (d + s - 1) / s
arg = self.translate(node.args[0], res, ctx)
scaling_factor = node.args[0].type.scaling_factor
if name == names.FLOOR:
expr = helpers.floor(self.viper_ast, arg, scaling_factor, pos)
elif name == names.CEIL:
expr = helpers.ceil(self.viper_ast, arg, scaling_factor, pos)
else:
assert False
return expr
elif name == names.SHIFT:
arg = self.translate(node.args[0], res, ctx)
shift = self.translate(node.args[1], res, ctx)
return helpers.shift(self.viper_ast, arg, shift, pos)
elif name in [names.BITWISE_AND, names.BITWISE_OR, names.BITWISE_XOR]:
a = self.translate(node.args[0], res, ctx)
b = self.translate(node.args[1], res, ctx)
funcs = {
names.BITWISE_AND: helpers.bitwise_and,
names.BITWISE_OR: helpers.bitwise_or,
names.BITWISE_XOR: helpers.bitwise_xor
}
return funcs[name](self.viper_ast, a, b, pos)
elif name == names.BITWISE_NOT:
arg = self.translate(node.args[0], res, ctx)
return helpers.bitwise_not(self.viper_ast, arg, pos)
elif name == names.AS_WEI_VALUE:
arg = self.translate(node.args[0], res, ctx)
second_arg = node.args[1]
assert isinstance(second_arg, ast.Str)
unit = second_arg.s
unit_pos = self.to_position(second_arg, ctx)
multiplier = next(v for k, v in names.ETHER_UNITS.items() if unit in k)
multiplier_lit = self.viper_ast.IntLit(multiplier, unit_pos)
num = self.viper_ast.Mul(arg, multiplier_lit, pos)
if types.is_bounded(node.type):
self.arithmetic_translator.check_under_overflow(num, node.type, res, ctx, pos)
return num
elif name == names.AS_UNITLESS_NUMBER:
return self.translate(node.args[0], res, ctx)
elif name == names.LEN:
arr = self.translate(node.args[0], res, ctx)
return helpers.array_length(self.viper_ast, arr, pos)
elif name == names.RANGE:
if len(node.args) == 1:
start = self.viper_ast.IntLit(0, pos)
end = self.translate(node.args[0], res, ctx)
else:
start = self.translate(node.args[0], res, ctx)
end = self.translate(node.args[1], res, ctx)
return helpers.range(self.viper_ast, start, end, pos)
elif name == names.CONCAT:
concats = [self.translate(arg, res, ctx) for arg in node.args]
def concat(arguments):
argument, *tail = arguments
if not tail:
return argument
else:
return self.viper_ast.SeqAppend(argument, concat(tail), pos)
return concat(concats)
elif name == names.EXTRACT32:
b = self.translate(node.args[0], res, ctx)
b_len = helpers.array_length(self.viper_ast, b, pos)
zero = self.viper_ast.IntLit(0, pos)
start = self.translate(node.args[1], res, ctx)
lit_32 = self.viper_ast.IntLit(32, pos)
end = self.viper_ast.Add(lit_32, start, pos)
# General revert conditions
start_is_negative = self.viper_ast.LtCmp(start, zero, pos)
seq_too_small = self.viper_ast.LtCmp(b_len, end, pos)
cond = self.viper_ast.Or(start_is_negative, seq_too_small)
self.fail_if(cond, [], res, ctx, pos)
# Convert byte list to desired type
b_sliced = self.viper_ast.SeqTake(b, end, pos)
b_sliced = self.viper_ast.SeqDrop(b_sliced, start, pos)
b_bytes32 = helpers.pad32(self.viper_ast, b_sliced, pos)
with switch(node.type) as case:
if case(types.VYPER_BYTES32):
i = b_bytes32
elif case(types.VYPER_INT128):
i = helpers.convert_bytes32_to_signed_int(self.viper_ast, b_bytes32, pos)
self.arithmetic_translator.check_under_overflow(i, types.VYPER_INT128, res, ctx, pos)
elif case(types.VYPER_ADDRESS):
i = helpers.convert_bytes32_to_unsigned_int(self.viper_ast, b_bytes32, pos)
self.arithmetic_translator.check_under_overflow(i, types.VYPER_ADDRESS, res, ctx, pos)
else:
assert False
return i
elif name == names.EMPTY:
return self.type_translator.default_value(node, node.type, res, ctx)
elif name == names.CONVERT:
from_type = node.args[0].type
to_type = node.type
arg = self.translate(node.args[0], res, ctx)
if isinstance(from_type, ArrayType) and from_type.element_type == types.VYPER_BYTE:
if from_type.size > 32:
raise UnsupportedException(node, 'Unsupported type converison.')
# If we convert a byte array to some type, we simply pad it to a bytes32 and
# proceed as if we had been given a bytes32
arg = helpers.pad32(self.viper_ast, arg, pos)
from_type = types.VYPER_BYTES32
zero = self.viper_ast.IntLit(0, pos)
one = self.viper_ast.IntLit(1, pos)
zero_list = [0] * 32
one_list = [0] * 31 + [1]
zero_array = self.viper_ast.ExplicitSeq([self.viper_ast.IntLit(i, pos) for i in zero_list], pos)
one_array = self.viper_ast.ExplicitSeq([self.viper_ast.IntLit(i, pos) for i in one_list], pos)
with switch(from_type, to_type) as case:
from twovyper.utils import _
# If both types are equal (e.g. if we convert a literal) we simply
# return the argument
if case(_, _, where=from_type == to_type):
return arg
# --------------------- bool -> ? ---------------------
# If we convert from a bool we translate True as 1 and False as 0
elif case(types.VYPER_BOOL, types.VYPER_DECIMAL):
d_one = 1 * types.VYPER_DECIMAL.scaling_factor
d_one_lit = self.viper_ast.IntLit(d_one, pos)
return helpers.w_wrap(self.viper_ast, self.viper_ast.CondExp(arg, d_one_lit, zero, pos))
elif case(types.VYPER_BOOL, types.VYPER_BYTES32):
return self.viper_ast.CondExp(arg, one_array, zero_array, pos)
elif case(types.VYPER_BOOL, _, where=types.is_numeric(to_type)):
return helpers.w_wrap(self.viper_ast, self.viper_ast.CondExp(arg, one, zero, pos))
elif case(types.VYPER_BOOL, _):
return self.viper_ast.CondExp(arg, one, zero, pos)
# --------------------- ? -> bool ---------------------
# If we convert to a bool we check for zero
elif case(types.VYPER_BYTES32, types.VYPER_BOOL):
return self.viper_ast.NeCmp(arg, zero_array, pos)
elif case(_, types.VYPER_BOOL):
return self.viper_ast.NeCmp(arg, zero, pos)
# --------------------- decimal -> ? ---------------------
elif case(types.VYPER_DECIMAL, types.VYPER_INT128):
s = self.viper_ast.IntLit(types.VYPER_DECIMAL.scaling_factor, pos)
return helpers.div(self.viper_ast, arg, s, pos)
elif case(types.VYPER_DECIMAL, types.VYPER_UINT256):
s = self.viper_ast.IntLit(types.VYPER_DECIMAL.scaling_factor, pos)
div = helpers.div(self.viper_ast, arg, s, pos)
self.arithmetic_translator.check_underflow(div, to_type, res, ctx, pos)
return div
elif case(types.VYPER_DECIMAL, types.VYPER_BYTES32):
return helpers.convert_signed_int_to_bytes32(self.viper_ast, arg, pos)
# --------------------- int128 -> ? ---------------------
elif case(types.VYPER_INT128, types.VYPER_DECIMAL):
s = self.viper_ast.IntLit(types.VYPER_DECIMAL.scaling_factor, pos)
return self.viper_ast.Mul(arg, s, pos)
# When converting a signed number to an unsigned number we revert if
# the argument is negative
elif case(types.VYPER_INT128, types.VYPER_UINT256):
self.arithmetic_translator.check_underflow(arg, to_type, res, ctx, pos)
return arg
elif case(types.VYPER_INT128, types.VYPER_BYTES32):
return helpers.convert_signed_int_to_bytes32(self.viper_ast, arg, pos)
# --------------------- uint256 -> ? ---------------------
elif case(types.VYPER_UINT256, types.VYPER_DECIMAL):
s = self.viper_ast.IntLit(types.VYPER_DECIMAL.scaling_factor, pos)
mul = self.viper_ast.Mul(arg, s, pos)
self.arithmetic_translator.check_overflow(mul, to_type, res, ctx, pos)
return mul
# If we convert an unsigned to a signed value we simply return
# the argument, given that it fits
elif case(types.VYPER_UINT256, types.VYPER_INT128):
self.arithmetic_translator.check_overflow(arg, to_type, res, ctx, pos)
return arg
elif case(types.VYPER_UINT256, types.VYPER_BYTES32):
return helpers.convert_unsigned_int_to_bytes32(self.viper_ast, arg, pos)
# --------------------- bytes32 -> ? ---------------------
elif case(types.VYPER_BYTES32, types.VYPER_DECIMAL) or case(types.VYPER_BYTES32, types.VYPER_INT128):
i = helpers.convert_bytes32_to_signed_int(self.viper_ast, arg, pos)
self.arithmetic_translator.check_under_overflow(i, to_type, res, ctx, pos)
return i
elif case(types.VYPER_BYTES32, types.VYPER_UINT256):
# uint256 and bytes32 have the same size, so no overflow check is necessary
return helpers.convert_bytes32_to_unsigned_int(self.viper_ast, arg, pos)
else:
raise UnsupportedException(node, 'Unsupported type converison.')
elif name == names.KECCAK256:
arg = self.translate(node.args[0], res, ctx)
return helpers.keccak256(self.viper_ast, arg, pos)
elif name == names.SHA256:
arg = self.translate(node.args[0], res, ctx)
return helpers.sha256(self.viper_ast, arg, pos)
elif name == names.BLOCKHASH:
arg = self.translate(node.args[0], res, ctx)
block = ctx.block_var.local_var(ctx)
number_type = self.type_translator.translate(types.BLOCK_TYPE.member_types[names.BLOCK_NUMBER], ctx)
block_number = helpers.struct_get(self.viper_ast, block, names.BLOCK_NUMBER, number_type,
types.BLOCK_TYPE, pos)
# Only the last 256 blocks (before the current block) are available in blockhash, else we revert
lt = self.viper_ast.LtCmp(arg, block_number, pos)
last_256 = self.viper_ast.Sub(block_number, self.viper_ast.IntLit(256, pos), pos)
ge = self.viper_ast.GeCmp(arg, last_256, pos)
cond = self.viper_ast.Not(self.viper_ast.And(lt, ge, pos), pos)
self.fail_if(cond, [], res, ctx, pos)
return helpers.blockhash(self.viper_ast, arg, ctx, pos)
elif name == names.METHOD_ID:
arg = self.translate(node.args[0], res, ctx)
return helpers.method_id(self.viper_ast, arg, node.type.size, pos)
elif name == names.ECRECOVER:
args = [self.translate(arg, res, ctx) for arg in node.args]
return helpers.ecrecover(self.viper_ast, args, pos)
elif name == names.ECADD or name == names.ECMUL:
args = [self.translate(arg, res, ctx) for arg in node.args]
fail_var_name = ctx.new_local_var_name('$fail')
fail_var_decl = self.viper_ast.LocalVarDecl(fail_var_name, self.viper_ast.Bool, pos)
ctx.new_local_vars.append(fail_var_decl)
fail_var = fail_var_decl.localVar()
self.fail_if(fail_var, [], res, ctx, pos)
if name == names.ECADD:
return helpers.ecadd(self.viper_ast, args, pos)
else:
return helpers.ecmul(self.viper_ast, args, pos)
elif name == names.SELFDESTRUCT:
to = self.translate(node.args[0], res, ctx)
self_var = ctx.self_var.local_var(ctx)
self_type = ctx.self_type
balance = self.balance_translator.get_balance(self_var, ctx, pos)
if ctx.program.config.has_option(names.CONFIG_ALLOCATION):
self.allocation_translator.deallocate_wei(node, to, balance, res, ctx, pos)
val = self.viper_ast.TrueLit(pos)
member = mangled.SELFDESTRUCT_FIELD
viper_type = self.type_translator.translate(self_type.member_types[member], ctx)
sset = helpers.struct_set(self.viper_ast, self_var, val, member, viper_type, self_type, pos)
res.append(self.viper_ast.LocalVarAssign(self_var, sset, pos))
self.balance_translator.increase_sent(to, balance, res, ctx, pos)
zero = self.viper_ast.IntLit(0, pos)
bset = self.balance_translator.set_balance(self_var, zero, ctx, pos)
res.append(self.viper_ast.LocalVarAssign(self_var, bset, pos))
res.append(self.viper_ast.Goto(ctx.return_label, pos))
return None
elif name == names.ASSERT_MODIFIABLE:
cond = self.translate(node.args[0], res, ctx)
not_cond = self.viper_ast.Not(cond, pos)
self.fail_if(not_cond, [], res, ctx, pos)
return None
elif name == names.SEND:
to = self.translate(node.args[0], res, ctx)
amount = self.translate(node.args[1], res, ctx)
_, expr = self._translate_external_call(node, to, amount, False, res, ctx)
return expr
elif name == names.RAW_CALL:
# Translate the callee address
to = self.translate(node.args[0], res, ctx)
# Translate the data expression (bytes)
_ = self.translate(node.args[1], res, ctx)
amount = self.viper_ast.IntLit(0, pos)
is_static = False
for kw in node.keywords:
arg = self.translate(kw.value, res, ctx)
if kw.name == names.RAW_CALL_VALUE:
amount = arg
elif kw.name == names.RAW_CALL_IS_STATIC_CALL:
assert isinstance(kw.value, ast.Bool)
is_static = kw.value.value
_, call = self._translate_external_call(node, to, amount, is_static, res, ctx)
return call
elif name == names.RAW_LOG:
_ = self.translate(node.args[0], res, ctx)
_ = self.translate(node.args[1], res, ctx)
# Since we don't know what raw_log logs, any event could have been emitted.
# Therefore we create a fresh var and do
# if var == 0:
# log.event1(...)
# elif var == 1:
# log.event2(...)
# ...
# for all events to indicate that at most one event has been emitted.
var_name = ctx.new_local_var_name('$a')
var_decl = self.viper_ast.LocalVarDecl(var_name, self.viper_ast.Int, pos)
ctx.new_local_vars.append(var_decl)
var = var_decl.localVar()
for idx, event in enumerate(ctx.program.events.values()):
condition = self.viper_ast.EqCmp(var, self.viper_ast.IntLit(idx, pos), pos)
args = []
for arg_type in event.type.arg_types:
arg_name = ctx.new_local_var_name('$arg')
arg_type = self.type_translator.translate(arg_type, ctx)
arg = self.viper_ast.LocalVarDecl(arg_name, arg_type, pos)
ctx.new_local_vars.append(arg)
args.append(arg.localVar())
log_event = []
self.log_event(event, args, log_event, ctx, pos)
res.append(self.viper_ast.If(condition, log_event, [], pos))
return None
elif name == names.CREATE_FORWARDER_TO:
at = self.translate(node.args[0], res, ctx)
if node.keywords:
amount = self.translate(node.keywords[0].value, res, ctx)
if ctx.program.config.has_option(names.CONFIG_ALLOCATION):
msg_sender = helpers.msg_sender(self.viper_ast, ctx, pos)
self.allocation_translator.deallocate_wei(node, msg_sender, amount, res, ctx, pos)
self.balance_translator.check_balance(amount, res, ctx, pos)
self.balance_translator.increase_sent(at, amount, res, ctx, pos)
self.balance_translator.decrease_balance(amount, res, ctx, pos)
new_name = ctx.new_local_var_name('$new')
viper_type = self.type_translator.translate(node.type, ctx)
new_var_decl = self.viper_ast.LocalVarDecl(new_name, viper_type, pos)
ctx.new_local_vars.append(new_var_decl)
new_var = new_var_decl.localVar()
eq_zero = self.viper_ast.EqCmp(new_var, self.viper_ast.IntLit(0, pos), pos)
self.fail_if(eq_zero, [], res, ctx, pos)
return new_var
# This is a struct initializer
elif len(node.args) == 1 and isinstance(node.args[0], ast.Dict):
first_arg = node.args[0]
assert isinstance(first_arg, ast.Dict)
exprs = {}
for key, value in zip(first_arg.keys, first_arg.values):
value_expr = self.translate(value, res, ctx)
idx = node.type.member_indices[key.id]
exprs[idx] = value_expr
init_args = [exprs[i] for i in range(len(exprs))]
init = helpers.struct_init(self.viper_ast, init_args, node.type, pos)
return init
# This is a contract / interface initializer
elif name in ctx.current_program.contracts or name in ctx.current_program.interfaces:
return self.translate(node.args[0], res, ctx)
elif name in names.GHOST_STATEMENTS:
return self.spec_translator.translate_ghost_statement(node, res, ctx)
else:
assert False
def translate_ReceiverCall(self, node: ast.ReceiverCall, res: List[Stmt], ctx: Context) -> Expr:
pos = self.to_position(node, ctx)
name = node.name
args = [self.translate(arg, res, ctx) for arg in node.args]
rec_type = node.receiver.type
if isinstance(rec_type, types.SelfType):
call_result = self.function_translator.inline(node, args, res, ctx)
return call_result
elif isinstance(rec_type, (ContractType, InterfaceType)):
to = self.translate(node.receiver, res, ctx)
val_idx = first_index(lambda n: n.name == names.RAW_CALL_VALUE, node.keywords)
if val_idx >= 0:
amount = self.translate(node.keywords[val_idx].value, res, ctx)
else:
amount = None
if isinstance(rec_type, ContractType):
const = rec_type.function_modifiers[node.name] == names.CONSTANT
pure = rec_type.function_modifiers[node.name] == names.PURE
_, call_result = self._translate_external_call(node, to, amount, const or pure, res, ctx)
else:
interface = ctx.program.interfaces[rec_type.name]
function = interface.functions[name]
const = function.is_constant()
# If the function is payable, but no ether is sent, revert
# If the function is not payable, but ether is sent, revert
zero = self.viper_ast.IntLit(0, pos)
if function.is_payable():
cond = self.viper_ast.LeCmp(amount, zero, pos) if amount else self.viper_ast.TrueLit(pos)
else:
cond = self.viper_ast.NeCmp(amount, zero, pos) if amount else self.viper_ast.FalseLit(pos)
self.fail_if(cond, [], res, ctx, pos)
known = (interface, function, args)
succ, call_result = self._translate_external_call(node, to, amount, const, res, ctx, known)
return call_result
else:
assert isinstance(node.receiver, ast.Name)
if node.receiver.id == names.LOG:
event = ctx.program.events[name]
self.log_event(event, args, res, ctx, pos)
return None
elif node.receiver.id == names.LEMMA:
lemma = ctx.program.lemmas[node.name]
mangled_name = mangled.lemma_name(node.name)
call_pos = self.to_position(lemma.node, ctx)
via = Via('lemma', call_pos)
pos = self.to_position(node, ctx, vias=[via], rules=rules.LEMMA_FAIL, values={'function': lemma})
args = [self.translate_top_level_expression(arg, res, ctx) for arg in node.args]
for idx, arg_var in enumerate(lemma.args.values()):
if types.is_numeric(arg_var.type):
if self.arithmetic_translator.is_unwrapped(args[idx]):
args[idx] = helpers.w_wrap(self.viper_ast, args[idx], pos)
viper_ast = self.viper_ast
if isinstance(viper_ast, WrappedViperAST):
viper_ast = viper_ast.viper_ast
return viper_ast.FuncApp(mangled_name, args, pos, type=self.viper_ast.Bool)
else:
assert False
def assert_caller_private(self, modelt: ModelTransformation, res: List[Stmt], ctx: Context, vias: List[Via] = None):
for interface_type in ctx.program.implements:
interface = ctx.program.interfaces[interface_type.name]
with ctx.program_scope(interface):
with ctx.state_scope(ctx.current_state, ctx.current_old_state):
for caller_private in interface.caller_private:
pos = self.to_position(caller_private, ctx, rules.CALLER_PRIVATE_FAIL, vias or [], modelt)
# Quantified variable
q_name = mangled.quantifier_var_name(mangled.CALLER)
q_var = TranslatedVar(mangled.CALLER, q_name, types.VYPER_ADDRESS, self.viper_ast, pos)
ctx.locals[mangled.CALLER] = q_var
# $caller != msg.sender ==> Expr == old(Expr)
msg_sender = helpers.msg_sender(self.viper_ast, ctx, pos)
ignore_cond = self.viper_ast.NeCmp(msg_sender, q_var.local_var(ctx, pos), pos)
_, curr_caller_private = self.spec_translator.translate_caller_private(caller_private, ctx)
with ctx.state_scope(ctx.current_old_state, ctx.current_old_state):
cond, old_caller_private = self.spec_translator\
.translate_caller_private(caller_private, ctx)
ignore_cond = self.viper_ast.And(ignore_cond, cond, pos)
caller_private_cond = self.viper_ast.EqCmp(curr_caller_private, old_caller_private, pos)
expr = self.viper_ast.Implies(ignore_cond, caller_private_cond, pos)
# Address type assumption
type_assumptions = self.type_translator.type_assumptions(q_var.local_var(ctx), q_var.type, ctx)
type_assumptions = reduce(self.viper_ast.And, type_assumptions, self.viper_ast.TrueLit())
expr = self.viper_ast.Implies(type_assumptions, expr, pos)
# Assertion
forall = self.viper_ast.Forall([q_var.var_decl(ctx)], [], expr, pos)
res.append(self.viper_ast.Assert(forall, pos))
def assume_own_resources_stayed_constant(self, res: List[Stmt], ctx: Context, pos=None):
if not ctx.program.config.has_option(names.CONFIG_ALLOCATION):
return
interface_names = [t.name for t in ctx.current_program.implements]
interfaces = [ctx.current_program.interfaces[name] for name in interface_names]
# The underlying wei resource must be translated differently. Therefore, exclude it for the moment.
own_resources = [(name, resource) for name, resource in ctx.current_program.own_resources.items()
if name != names.UNDERLYING_WEI]
for i in interfaces:
interface_resources = [(name, resource) for name, resource in i.own_resources.items()
if name != names.UNDERLYING_WEI]
own_resources.extend(interface_resources)
translated_resources1 = self.resource_translator\
.translate_resources_for_quantified_expr(own_resources, ctx, pos)
translated_resources2 = self.resource_translator\
.translate_resources_for_quantified_expr(own_resources, ctx, pos, args_idx_start=len(translated_resources1))
# Special translation for creator resources
creator_resource = helpers.creator_resource()
arg = self.viper_ast.LocalVarDecl(f'$arg$$1', helpers.struct_type(self.viper_ast), pos)
t_resource = self.resource_translator.creator_resource(arg.localVar(), ctx, pos)
translated_resources1.append((t_resource, [arg], self.viper_ast.TrueLit()))
arg = self.viper_ast.LocalVarDecl(f'$arg$$2', helpers.struct_type(self.viper_ast), pos)
t_resource = self.resource_translator.creator_resource(arg.localVar(), ctx, pos)
translated_resources2.append((t_resource, [arg], self.viper_ast.TrueLit()))
# forall({r: own Resources}, allocated[r]() == old(allocated[r]()))
current_allocated = ctx.current_state[mangled.ALLOCATED].local_var(ctx)
old_allocated = ctx.current_old_state[mangled.ALLOCATED].local_var(ctx)
for t_resource, args, type_cond in translated_resources1:
current_allocated_map = self.allocation_translator.get_allocated_map(current_allocated, t_resource, ctx)
old_allocated_map = self.allocation_translator.get_allocated_map(old_allocated, t_resource, ctx)
allocated_eq = self.viper_ast.EqCmp(current_allocated_map, old_allocated_map, pos)
trigger = self.viper_ast.Trigger([current_allocated_map, t_resource], pos)
forall_eq = self.viper_ast.Forall([*args], [trigger],
self.viper_ast.Implies(type_cond, allocated_eq, pos), pos)
res.append(self.viper_ast.Inhale(forall_eq, pos))
# trusted(self) == old(trusted(self))
current_trusted = ctx.current_state[mangled.TRUSTED].local_var(ctx)
old_trusted = ctx.current_old_state[mangled.TRUSTED].local_var(ctx)
self_addr = ctx.self_address or helpers.self_address(self.viper_ast, pos)
current_trusted_map = self.allocation_translator.get_trusted_map(current_trusted, self_addr, ctx)
old_trusted_map = self.allocation_translator.get_trusted_map(old_trusted, self_addr, ctx)
eq = self.viper_ast.EqCmp(current_trusted_map, old_trusted_map, pos)
res.append(self.viper_ast.Inhale(eq, pos))
# Quantified address variable
address = self.viper_ast.LocalVarDecl('$a', self.viper_ast.Int)
address_var = address.localVar()
address_type_conds = self.type_translator.type_assumptions(address_var, types.VYPER_ADDRESS, ctx)
address_type_cond = reduce(lambda l, r: self.viper_ast.And(l, r, pos),
address_type_conds, self.viper_ast.TrueLit())
# forall({r1: own Resources, r2: own Resources}, offered[r1 <-> r2]() == old(offered[r1 <-> r2]())
current_offered = ctx.current_state[mangled.OFFERED].local_var(ctx)
old_offered = ctx.current_old_state[mangled.OFFERED].local_var(ctx)
for index, (t_resource1, args1, type_cond1) in enumerate(translated_resources1):
resource1 = own_resources[index][1] if index < len(own_resources) else None
for t_resource2, args2, type_cond2 in translated_resources2:
current_offered_map = self.allocation_translator.get_offered_map(current_offered, t_resource1,
t_resource2, ctx)
old_offered_map = self.allocation_translator.get_offered_map(old_offered, t_resource1, t_resource2, ctx)
offered_eq = self.viper_ast.EqCmp(current_offered_map, old_offered_map, pos)
type_cond = self.viper_ast.And(type_cond1, type_cond2)
forall_eq = self.viper_ast.Forall(
[*args1, *args2], [self.viper_ast.Trigger([current_offered_map], pos),
self.viper_ast.Trigger([old_offered_map], pos)],
self.viper_ast.Implies(type_cond, offered_eq, pos), pos)
res.append(self.viper_ast.Inhale(forall_eq, pos))
if resource1 is not None and resource1.underlying_resource is not None:
if resource1.name == names.WEI:
t_underlying_resource = self.resource_translator.underlying_wei_resource(ctx)
else:
t_underlying_address = self.spec_translator.translate(resource1.underlying_address, res, ctx)
args = self.viper_ast.to_list(t_resource1.getArgs())
args.pop()
args.append(t_underlying_address)
assert isinstance(resource1.type, types.DerivedResourceType)
t_underlying_resource = helpers.struct_init(self.viper_ast, args,
resource1.type.underlying_resource)
current_offered_map = self.allocation_translator.get_offered_map(current_offered, t_resource1,
t_underlying_resource, ctx)
old_offered_map = self.allocation_translator.get_offered_map(old_offered, t_resource1,
t_underlying_resource, ctx)
offered_eq = self.viper_ast.EqCmp(current_offered_map, old_offered_map, pos)
forall_eq = self.viper_ast.Forall(
[*args1], [self.viper_ast.Trigger([current_offered_map], pos),
self.viper_ast.Trigger([old_offered_map], pos)],
self.viper_ast.Implies(type_cond1, offered_eq, pos), pos)
res.append(self.viper_ast.Inhale(forall_eq, pos))
no_offers = helpers.no_offers(self.viper_ast, old_offered, t_resource1, address_var)
curr_no_offers = helpers.no_offers(self.viper_ast, current_offered, t_resource1, address_var)
implies = self.viper_ast.Implies(no_offers, curr_no_offers, pos)
trigger = self.viper_ast.Trigger([curr_no_offers], pos)
type_cond = self.viper_ast.And(type_cond1, address_type_cond)
forall_eq = self.viper_ast.Forall([address, *args1], [trigger],
self.viper_ast.Implies(type_cond, implies))
res.append(self.viper_ast.Inhale(forall_eq, pos))
def assume_interface_resources_stayed_constant(self, interface, interface_inst, res, ctx: Context, pos=None):
if isinstance(interface, VyperProgram):
with ctx.program_scope(interface):
with ctx.self_address_scope(interface_inst):
self.assume_own_resources_stayed_constant(res, ctx, pos)
def _implicit_resource_caller_private_expressions(self, interface, self_address, res, ctx, pos=None):
if not ctx.program.config.has_option(names.CONFIG_ALLOCATION):
return
body = []
# Quantified self address
q_self_address_from_context = []
q_self_address_name = mangled.quantifier_var_name(names.INTERFACE)
q_self_address = ctx.quantified_vars.get(q_self_address_name)
if q_self_address is not None:
q_self_address_from_context = [q_self_address.var_decl(ctx)]
# Interface Address
interface_addr = ctx.self_address or helpers.self_address(self.viper_ast)
# Quantified address variable
address = self.viper_ast.LocalVarDecl('$a', self.viper_ast.Int)
address_var = address.localVar()
type_conds = self.type_translator.type_assumptions(address_var, types.VYPER_ADDRESS, ctx)
type_cond = reduce(lambda l, r: self.viper_ast.And(l, r, pos), type_conds, self.viper_ast.TrueLit())
# forall({a: address}, trusted(a, by=self, where=interface)
# == old(trusted(a, by=self, where=interface)))
current_trusted = ctx.current_state[mangled.TRUSTED].local_var(ctx)
old_trusted = ctx.current_old_state[mangled.TRUSTED].local_var(ctx)
curr_trusted_value = self.allocation_translator.get_trusted(current_trusted, interface_addr,
address_var, self_address, ctx)
old_trusted_value = self.allocation_translator.get_trusted(old_trusted, interface_addr,
address_var, self_address, ctx)
trusted_eq = self.viper_ast.EqCmp(curr_trusted_value, old_trusted_value)
forall_eq = self.viper_ast.Forall([address, *q_self_address_from_context],
[self.viper_ast.Trigger([old_trusted_value], pos),
self.viper_ast.Trigger([curr_trusted_value], pos)],
self.viper_ast.Implies(type_cond, trusted_eq))
body.append(self.viper_ast.Inhale(forall_eq, pos))
current_trust_no_one = helpers.trust_no_one(self.viper_ast, current_trusted, self_address, interface_addr)
old_trust_no_one = helpers.trust_no_one(self.viper_ast, old_trusted, self_address, interface_addr)
forall_implies = self.viper_ast.Forall([*q_self_address_from_context],
[self.viper_ast.Trigger([current_trust_no_one], pos),
self.viper_ast.Trigger([old_trust_no_one], pos)],
self.viper_ast.Implies(old_trust_no_one, current_trust_no_one))
body.append(self.viper_ast.Inhale(forall_implies, pos))
# Declared resources of interface
resources = interface.declared_resources.items()
translated_resources1 = self.resource_translator.translate_resources_for_quantified_expr(resources, ctx)
translated_resources2 = self.resource_translator\
.translate_resources_for_quantified_expr(resources, ctx, args_idx_start=len(translated_resources1))
# No trust condition
trust_no_one = helpers.trust_no_one(self.viper_ast, old_trusted,
self_address, interface_addr)
# Quantified offer struct variable
offer = self.viper_ast.LocalVarDecl('$o', helpers.struct_type(self.viper_ast))
offer_var = offer.localVar()
# Offered map type
offered_type = helpers.offered_type()
k_type = self.type_translator.translate(offered_type.value_type.value_type.key_type, ctx)
v_type = self.type_translator.translate(offered_type.value_type.value_type.value_type, ctx)
# forall({r1: Resource on interface, r2: Resource on interface, o: Offer},
# trust_no_one(self, interface) ==> old(offered[r1 <-> r2][o]) == 0 ==>
# offered[r1 <-> r2][o] == 0)
current_offered = ctx.current_state[mangled.OFFERED].local_var(ctx)
old_offered = ctx.current_old_state[mangled.OFFERED].local_var(ctx)
for t_resource1, args1, type_cond1 in translated_resources1:
for t_resource2, args2, type_cond2 in translated_resources2:
current_offered_map = self.allocation_translator \
.get_offered_map(current_offered, t_resource1, t_resource2, ctx)
old_offered_map = self.allocation_translator \
.get_offered_map(old_offered, t_resource1, t_resource2, ctx)
current_offered_map_get = helpers.map_get(self.viper_ast, current_offered_map,
offer_var, k_type, v_type)
old_offered_map_get = helpers.map_get(self.viper_ast, old_offered_map,
offer_var, k_type, v_type)
offered_eq = self.viper_ast.EqCmp(current_offered_map_get, old_offered_map_get)
type_cond = self.viper_ast.And(type_cond1, type_cond2)
cond = self.viper_ast.And(trust_no_one, type_cond)
forall_eq = self.viper_ast.Forall([offer, *args1, *args2, *q_self_address_from_context],
[self.viper_ast.Trigger([current_offered_map_get], pos),
self.viper_ast.Trigger([old_offered_map_get], pos)],
self.viper_ast.Implies(cond, offered_eq))
body.append(self.viper_ast.Inhale(forall_eq, pos))
# forall({r: Resource on interface}, trust_no_one(self, interface)
# and no_offers[r](self) ==> allocated[r](self) >= old(allocated[r](self)))
current_allocated = ctx.current_state[mangled.ALLOCATED].local_var(ctx)
old_allocated = ctx.current_old_state[mangled.ALLOCATED].local_var(ctx)
for t_resource, args, type_cond in translated_resources1:
# No offers condition
no_offers = helpers.no_offers(self.viper_ast, old_offered, t_resource, self_address)
curr_no_offers = helpers.no_offers(self.viper_ast, current_offered, t_resource, self_address)
current_allocated_map = self.allocation_translator \
.get_allocated(current_allocated, t_resource, self_address, ctx)
old_allocated_map = self.allocation_translator \
.get_allocated(old_allocated, t_resource, self_address, ctx)
allocated_geq = self.viper_ast.GeCmp(current_allocated_map, old_allocated_map, pos)
cond = self.viper_ast.And(trust_no_one, no_offers)
allocated_geq = self.viper_ast.Implies(cond, allocated_geq)
forall_implies = self.viper_ast.Forall([*args, *q_self_address_from_context],
[self.viper_ast.Trigger([current_allocated_map], pos),
self.viper_ast.Trigger([old_allocated_map], pos)],
self.viper_ast.Implies(type_cond, allocated_geq, pos), pos)
body.append(self.viper_ast.Inhale(forall_implies, pos))
forall_implies = self.viper_ast.Forall([*args, *q_self_address_from_context],
[self.viper_ast.Trigger([no_offers], pos),
self.viper_ast.Trigger([curr_no_offers], pos)],
self.viper_ast.Implies(no_offers, curr_no_offers, pos), pos)
body.append(self.viper_ast.Inhale(forall_implies, pos))
self.seqn_with_info(body, f"Implicit caller private expr of resources in interface {interface.name}", res)
def implicit_resource_caller_private_expressions(self, interface, interface_inst, self_address,
res, ctx: Context, pos=None):
if isinstance(interface, VyperInterface):
with ctx.program_scope(interface):
with ctx.self_address_scope(interface_inst):
self._implicit_resource_caller_private_expressions(interface, self_address, res, ctx, pos)
def assume_contract_state(self, known_interface_refs: List[Tuple[str, Expr]], res: List[Stmt], ctx: Context,
receiver: Optional[Expr] = None, skip_caller_private=False):
for interface_name, interface_ref in known_interface_refs:
body = []
if not skip_caller_private:
# Assume caller private
interface = ctx.program.interfaces[interface_name]
with ctx.program_scope(interface):
with ctx.state_scope(ctx.current_state, ctx.current_old_state):
# Caller variable
mangled_name = ctx.new_local_var_name(mangled.CALLER)
caller_var = TranslatedVar(mangled.CALLER, mangled_name, types.VYPER_ADDRESS,
self.viper_ast)
ctx.locals[mangled.CALLER] = caller_var
ctx.new_local_vars.append(caller_var.var_decl(ctx))
self_address = ctx.self_address or helpers.self_address(self.viper_ast)
if self.arithmetic_translator.is_wrapped(self_address):
self_address = helpers.w_unwrap(self.viper_ast, self_address)
assign = self.viper_ast.LocalVarAssign(caller_var.local_var(ctx), self_address)
body.append(assign)
with ctx.self_address_scope(interface_ref):
for caller_private in interface.caller_private:
pos = self.to_position(caller_private, ctx, rules.INHALE_CALLER_PRIVATE_FAIL)
# Caller private assumption
_, curr_caller_private = self.spec_translator\
.translate_caller_private(caller_private, ctx)
with ctx.state_scope(ctx.current_old_state, ctx.current_old_state):
cond, old_caller_private = self.spec_translator\
.translate_caller_private(caller_private, ctx)
caller_private_cond = self.viper_ast.EqCmp(curr_caller_private, old_caller_private, pos)
caller_private_cond = self.viper_ast.Implies(cond, caller_private_cond, pos)
body.append(self.viper_ast.Inhale(caller_private_cond, pos))
self._implicit_resource_caller_private_expressions(interface, caller_var.local_var(ctx),
body, ctx)
if receiver and body:
neq_cmp = self.viper_ast.NeCmp(receiver, interface_ref)
body = helpers.flattened_conditional(self.viper_ast, neq_cmp, body, [])
# Assume interface invariants
interface = ctx.program.interfaces[interface_name]
with ctx.program_scope(interface):
with ctx.self_address_scope(interface_ref):
for inv in ctx.current_program.invariants:
cond = self.spec_translator.translate_invariant(inv, res, ctx, True)
i_pos = self.to_position(inv, ctx, rules.INHALE_INVARIANT_FAIL)
body.append(self.viper_ast.Inhale(cond, i_pos))
if ctx.program.config.has_option(names.CONFIG_TRUST_CASTS):
res.extend(body)
else:
implements = helpers.implements(self.viper_ast, interface_ref, interface_name, ctx)
res.extend(helpers.flattened_conditional(self.viper_ast, implements, body, []))
def log_event(self, event: VyperEvent, args: List[Expr], res: List[Stmt], ctx: Context, pos=None):
assert ctx
event_name = mangled.event_name(event.name)
pred_acc = self.viper_ast.PredicateAccess(args, event_name, pos)
one = self.viper_ast.FullPerm(pos)
pred_acc_pred = self.viper_ast.PredicateAccessPredicate(pred_acc, one, pos)
log = self.viper_ast.Inhale(pred_acc_pred, pos)
self.seqn_with_info([log], f"Event: {event.name}", res)
def _translate_external_call(self,
node: ast.Expr,
to: Expr,
amount: Optional[Expr],
constant: bool,
res: List[Stmt],
ctx: Context,
known: Tuple[VyperInterface, VyperFunction, List[Expr]] = None) -> Tuple[Expr, Expr]:
# Sends are translated as follows:
# - Evaluate arguments to and amount
# - Check that balance is sufficient (self.balance >= amount) else revert
# - Increment sent by amount
# - Subtract amount from self.balance (self.balance -= amount)
# - If in init, set old_self to self if this is the first public state
# - Assert checks, own 'caller private' and inter contract invariants
# - The next step is only necessary if the function is modifying:
# - Create new old-contract state
# - Havoc contract state
# - Assert local state invariants
# - Fail based on an unknown value (i.e. the call could fail)
# - The next step is only necessary if the function is modifying:
# - Undo havocing of contract state
# - The next steps are only necessary if the function is modifying:
# - Create new old state which old in the invariants after the call refers to
# - Store state before call (To be used to restore old contract state)
# - Havoc state
# - Assume 'caller private' of interface state variables but NOT receiver
# - Assume invariants of interface state variables and receiver
# - Create new old-contract state
# - Havoc contract state
# - Assume type assumptions for self
# - Assume local state invariants (where old refers to the state before send)
# - Assume invariants of interface state variables and receiver
# - Assume transitive postcondition
# - Assume that there were no reentrant calls based on an unknown value
# - If there were no reentrant calls:
# - Restore state from old state
# - Restore old contract state
# - Create new old-contract state
# - Havoc contract state
# - Assume 'caller private' of interface state variables and receiver
# - Assert inter contract invariants (during call)
# - Create new old-contract state
# - Havoc contract state
# - Assume 'caller private' of interface state variables but NOT receiver
# - Assume invariants of interface state variables and receiver
# - Restore old contract state
# - In the case of an interface call:
# - Assume postconditions
# - The next step is only necessary if the function is modifying:
# - Assert inter contract invariants (after call)
# - Create new old state which subsequent old expressions refer to
pos = self.to_position(node, ctx)
self_var = ctx.self_var.local_var(ctx)
modifying = not constant
if known:
interface, function, args = known
else:
interface = None
function = None
args = None
if amount:
self.balance_translator.check_balance(amount, res, ctx, pos)
self.balance_translator.increase_sent(to, amount, res, ctx, pos)
if ctx.program.config.has_option(names.CONFIG_ALLOCATION):
self.allocation_translator.deallocate_wei(node, to, amount, res, ctx, pos)
self.balance_translator.decrease_balance(amount, res, ctx, pos)
general_stmts_for_performs = []
performs_as_stmts_generators = []
with ctx.inline_scope(None):
# Create pre_state for function call
def inlined_pre_state(name: str) -> str:
return ctx.inline_prefix + mangled.pre_state_var_name(name)
state_for_performs = self.state_translator.state(inlined_pre_state, ctx)
if modifying:
# Save the values of to, amount, and args, as self could be changed by reentrancy
if known:
def new_var(variable, name='v'):
name += '$'
var_name = ctx.new_local_var_name(name)
var_decl = self.viper_ast.LocalVarDecl(var_name, variable.typ(), pos)
ctx.new_local_vars.append(var_decl)
res.append(self.viper_ast.LocalVarAssign(var_decl.localVar(), variable))
return var_decl.localVar()
to = new_var(to, 'to')
if amount:
amount = new_var(amount, 'amount')
# Force evaluation at this point
args = list(map(new_var, args))
if known and function.performs:
self.state_translator.copy_state(ctx.current_state, state_for_performs, general_stmts_for_performs, ctx)
performs_as_stmts = {}
performs_decider_variables = {}
sender_is_resource_address_map = {}
with ctx.program_scope(interface):
with ctx.self_address_scope(to):
with ctx.state_scope(ctx.current_state, ctx.current_old_state):
ctx.current_state[mangled.SELF] = state_for_performs[mangled.SELF]
ctx.current_state[mangled.CONTRACTS] = state_for_performs[mangled.CONTRACTS]
with ctx.interface_call_scope():
# Define new msg variable
msg_name = ctx.inline_prefix + mangled.MSG
msg_var = TranslatedVar(names.MSG, msg_name, types.MSG_TYPE, self.viper_ast)
ctx.locals[names.MSG] = msg_var
ctx.new_local_vars.append(msg_var.var_decl(ctx))
# Assume msg.sender == self and msg.value == amount
msg = msg_var.local_var(ctx)
svytype = types.MSG_TYPE.member_types[names.MSG_SENDER]
svitype = self.type_translator.translate(svytype, ctx)
msg_sender = helpers.struct_get(self.viper_ast, msg, names.MSG_SENDER,
svitype, types.MSG_TYPE)
self_address = helpers.self_address(self.viper_ast)
general_stmts_for_performs.append(self.viper_ast.Inhale(
self.viper_ast.EqCmp(msg_sender, self_address)))
if amount:
vvytype = types.MSG_TYPE.member_types[names.MSG_VALUE]
vvitype = self.type_translator.translate(vvytype, ctx)
msg_value = helpers.struct_get(self.viper_ast, msg, names.MSG_VALUE,
vvitype, types.MSG_TYPE)
general_stmts_for_performs.append(self.viper_ast.Inhale(
self.viper_ast.EqCmp(msg_value, amount)))
# Arguments as translated variables
args_as_translated_var = [
TranslatedVar(name, val.name(), arg.type, self.viper_ast,
is_local=not self.arithmetic_translator.is_wrapped(val))
for (name, arg), val in zip(function.args.items(), args)]
ctx.locals.update((var.name, var) for var in args_as_translated_var)
# Assume performs clauses
with ctx.derived_resource_performs_scope():
for performs in function.performs:
self.spec_translator.translate_ghost_statement(
performs, general_stmts_for_performs, ctx, is_performs=True)
zero = self.viper_ast.IntLit(0)
two = self.viper_ast.IntLit(2)
for performs_idx, performs in enumerate(function.performs):
location_address = self.allocation_translator.location_address_of_performs(
performs, res, ctx)
if location_address is not None:
sender_is_resource_address = self.viper_ast.EqCmp(msg_sender, location_address)
else:
sender_is_resource_address = self.viper_ast.FalseLit()
perform_as_stmts = []
self.spec_translator.translate(performs, perform_as_stmts, ctx)
performs_var_name = ctx.new_local_var_name("performs_decider_var")
performs_var = TranslatedVar(performs_var_name, performs_var_name,
types.VYPER_UINT256, self.viper_ast)
ctx.locals[performs_var_name] = performs_var
ctx.new_local_vars.append(performs_var.var_decl(ctx))
performs_local_var = performs_var.local_var(ctx)
performs_var_ge_zero = self.viper_ast.GeCmp(performs_local_var, zero)
performs_var_le_two = self.viper_ast.LeCmp(performs_local_var, two)
cond = self.viper_ast.And(performs_var_ge_zero, performs_var_le_two)
general_stmts_for_performs.append(self.viper_ast.Inhale(cond))
performs_as_stmts[performs_idx] = perform_as_stmts
performs_decider_variables[performs_idx] = performs_local_var
sender_is_resource_address_map[performs_idx] = sender_is_resource_address
def conditional_perform_generator(p_idx: int) -> Callable[[int], List[Stmt]]:
def conditional_perform(index: int) -> List[Stmt]:
if index >= 0:
idx = self.viper_ast.IntLit(index)
decider_eq_idx = self.viper_ast.EqCmp(
performs_decider_variables[p_idx], idx)
cond_for_perform = self.viper_ast.And(
decider_eq_idx, self.viper_ast.Not(
sender_is_resource_address_map[performs_idx]))
return helpers.flattened_conditional(self.viper_ast, cond_for_perform,
performs_as_stmts[p_idx], [])
else:
return helpers.flattened_conditional(
self.viper_ast, sender_is_resource_address_map[performs_idx],
performs_as_stmts[p_idx], [])
return conditional_perform
performs_as_stmts_generators.append(conditional_perform_generator(performs_idx))
res.extend(general_stmts_for_performs)
# In init set the old self state to the current self state, if this is the
# first public state.
if ctx.function.name == names.INIT:
self.state_translator.check_first_public_state(res, ctx, True)
modelt = self.model_translator.save_variables(res, ctx, pos)
self.assert_caller_private(modelt, res, ctx, [Via('external function call', pos)])
for check in chain(ctx.function.checks, ctx.program.general_checks):
check_cond = self.spec_translator.translate_check(check, res, ctx)
via = [Via('check', check_cond.pos())]
check_pos = self.to_position(node, ctx, rules.CALL_CHECK_FAIL, via, modelt)
res.append(self.viper_ast.Assert(check_cond, check_pos))
def assert_invariants(inv_getter: Callable[[Context], List[ast.Expr]], rule: rules.Rule) -> List[Stmt]:
res_list = []
# Assert implemented interface invariants
for implemented_interface in ctx.program.implements:
vyper_interface = ctx.program.interfaces[implemented_interface.name]
with ctx.program_scope(vyper_interface):
for inv in inv_getter(ctx):
translated_inv = self.spec_translator.translate_invariant(inv, res_list, ctx, True)
call_pos = self.to_position(node, ctx, rule, [Via('invariant', translated_inv.pos())], modelt)
res_list.append(self.viper_ast.Assert(translated_inv, call_pos))
# Assert own invariants
for inv in inv_getter(ctx):
# We ignore accessible because it only has to be checked in the end of
# the function
translated_inv = self.spec_translator.translate_invariant(inv, res_list, ctx, True)
call_pos = self.to_position(node, ctx, rule, [Via('invariant', translated_inv.pos())], modelt)
res_list.append(self.viper_ast.Assert(translated_inv, call_pos))
return res_list
def assume_invariants(inv_getter: Callable[[Context], List[ast.Expr]]) -> List[Stmt]:
res_list = []
# Assume implemented interface invariants
for implemented_interface in ctx.program.implements:
vyper_interface = ctx.program.interfaces[implemented_interface.name]
with ctx.program_scope(vyper_interface):
for inv in inv_getter(ctx):
translated_inv = self.spec_translator.translate_invariant(inv, res_list, ctx, True)
inv_pos = self.to_position(inv, ctx, rules.INHALE_INVARIANT_FAIL)
res_list.append(self.viper_ast.Inhale(translated_inv, inv_pos))
# Assume own invariants
for inv in inv_getter(ctx):
translated_inv = self.spec_translator.translate_invariant(inv, res_list, ctx, True)
inv_pos = self.to_position(inv, ctx, rules.INHALE_INVARIANT_FAIL)
res_list.append(self.viper_ast.Inhale(translated_inv, inv_pos))
return res_list
assert_inter_contract_invariants = assert_invariants(lambda c: c.current_program.inter_contract_invariants,
rules.CALL_INVARIANT_FAIL)
self.seqn_with_info(assert_inter_contract_invariants, "Assert inter contract invariants before call", res)
assert_derived_resource_invariants = [self.viper_ast.Assert(expr, expr.pos())
for expr in ctx.derived_resources_invariants(node)]
self.seqn_with_info(assert_derived_resource_invariants, "Assert derived resource invariants before call", res)
self.forget_about_all_events(res, ctx, pos)
if modifying:
# Copy contract state
self.state_translator.copy_state(ctx.current_state, ctx.current_old_state, res, ctx,
unless=lambda n: n == mangled.SELF)
# Havoc contract state
self.state_translator.havoc_state(ctx.current_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.assume_own_resources_stayed_constant(res, ctx, pos)
assert_local_state_invariants = assert_invariants(lambda c: c.current_program.local_state_invariants,
rules.CALL_INVARIANT_FAIL)
self.seqn_with_info(assert_local_state_invariants, "Assert local state invariants before call", res)
# We check that the invariant tracks all allocation by doing a leak check.
if ctx.program.config.has_option(names.CONFIG_ALLOCATION):
self.allocation_translator.send_leak_check(node, res, ctx, pos)
send_fail_name = ctx.new_local_var_name('send_fail')
send_fail = self.viper_ast.LocalVarDecl(send_fail_name, self.viper_ast.Bool)
ctx.new_local_vars.append(send_fail)
fail_cond = send_fail.localVar()
if node.type:
ret_name = ctx.new_local_var_name('raw_ret')
ret_type = self.type_translator.translate(node.type, ctx)
ret_var = self.viper_ast.LocalVarDecl(ret_name, ret_type, pos)
ctx.new_local_vars.append(ret_var)
return_value = ret_var.localVar()
type_ass = self.type_translator.type_assumptions(return_value, node.type, ctx)
res.extend(self.viper_ast.Inhale(ass) for ass in type_ass)
else:
return_value = None
call_failed = helpers.call_failed(self.viper_ast, to, pos)
self.fail_if(fail_cond, [call_failed], res, ctx, pos)
if isinstance(node, ast.ReceiverCall):
# If it is a receiver call and the receiver is null the transaction will revert.
self.fail_if(self.viper_ast.EqCmp(to, self.viper_ast.IntLit(0, pos), pos), [call_failed], res, ctx, pos)
with ctx.inline_scope(None):
# Create pre_state for function call
def inlined_pre_state(name: str) -> str:
return ctx.inline_prefix + mangled.pre_state_var_name(name)
old_state_for_postconditions = self.state_translator.state(inlined_pre_state, ctx)
with ctx.inline_scope(None):
# Create needed states to verify inter contract invariants
def inlined_pre_state(name: str) -> str:
return ctx.inline_prefix + mangled.pre_state_var_name(name)
old_state_for_inter_contract_invariant_during = self.state_translator.state(inlined_pre_state, ctx)
def inlined_old_state(name: str) -> str:
return ctx.inline_prefix + mangled.old_state_var_name(name)
curr_state_for_inter_contract_invariant_during = self.state_translator.state(inlined_old_state, ctx)
with ctx.inline_scope(None):
# Create needed states to verify inter contract invariants
def inlined_pre_state(name: str) -> str:
return ctx.inline_prefix + mangled.pre_state_var_name(name)
old_state_for_inter_contract_invariant_after = self.state_translator.state(inlined_pre_state, ctx)
def inlined_old_state(name: str) -> str:
return ctx.inline_prefix + mangled.old_state_var_name(name)
curr_state_for_inter_contract_invariant_after = self.state_translator.state(inlined_old_state, ctx)
known_interface_ref = []
if modifying:
# Collect known interface references
self_type = ctx.program.fields.type
for member_name, member_type in self_type.member_types.items():
viper_type = self.type_translator.translate(member_type, ctx)
if isinstance(member_type, types.InterfaceType):
get = helpers.struct_get(self.viper_ast, ctx.self_var.local_var(ctx), member_name,
viper_type, self_type)
known_interface_ref.append((member_type.name, get))
for var in chain(ctx.locals.values(), ctx.args.values()):
assert isinstance(var, TranslatedVar)
if isinstance(var.type, types.InterfaceType):
known_interface_ref.append((var.type.name, var.local_var(ctx)))
# Undo havocing of contract state
self.state_translator.copy_state(ctx.current_old_state, ctx.current_state, res, ctx,
unless=lambda n: n == mangled.SELF)
for val in chain(state_for_performs.values(),
old_state_for_postconditions.values(),
old_state_for_inter_contract_invariant_during.values(),
curr_state_for_inter_contract_invariant_during.values(),
old_state_for_inter_contract_invariant_after.values(),
curr_state_for_inter_contract_invariant_after.values()):
ctx.new_local_vars.append(val.var_decl(ctx, pos))
# Copy state
self.state_translator.copy_state(ctx.current_state, ctx.current_old_state, res, ctx)
if modifying:
# Prepare old state for the postconditions of the external call
self.state_translator.copy_state(ctx.current_state, old_state_for_postconditions, res, ctx)
# Havoc state
self.state_translator.havoc_state(ctx.current_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.assume_own_resources_stayed_constant(res, ctx, pos)
# Prepare old state for inter contract invariants
assume_caller_private_without_receiver = []
self.assume_contract_state(known_interface_ref, assume_caller_private_without_receiver, ctx, to)
self.seqn_with_info(assume_caller_private_without_receiver, "Assume caller private for old state", res)
caller_address = ctx.self_address or helpers.self_address(self.viper_ast)
self.implicit_resource_caller_private_expressions(interface, to, caller_address, res, ctx)
res.extend(stmt for performs_as_stmts in performs_as_stmts_generators for stmt in performs_as_stmts(0))
self.state_translator.copy_state(ctx.current_state, old_state_for_inter_contract_invariant_during, res, ctx)
# Assume caller private and create new contract state
self.state_translator.copy_state(ctx.current_state, ctx.current_old_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.state_translator.havoc_state(ctx.current_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.assume_own_resources_stayed_constant(res, ctx, pos)
self.seqn_with_info(assume_caller_private_without_receiver, "Assume caller private", res)
self.implicit_resource_caller_private_expressions(interface, to, caller_address, res, ctx)
res.extend(stmt for performs_as_stmts in performs_as_stmts_generators for stmt in performs_as_stmts(1))
self.state_translator.copy_state(ctx.current_state, ctx.current_old_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.state_translator.havoc_state(ctx.current_state, res, ctx)
############################################################################################################
# We did not yet make any assumptions about the self state. #
# #
# The contract state (which models all self states of other contracts) is at a point where anything could #
# have happened, but it is before the receiver of the external call has made any re-entrant call to self. #
############################################################################################################
res.extend(stmt for performs_as_stmts in performs_as_stmts_generators for stmt in performs_as_stmts(-1))
type_ass = self.type_translator.type_assumptions(self_var, ctx.self_type, ctx)
assume_type_ass = [self.viper_ast.Inhale(inv) for inv in type_ass]
self.seqn_with_info(assume_type_ass, "Assume type assumptions", res)
assume_invs = []
for inv in ctx.unchecked_invariants():
assume_invs.append(self.viper_ast.Inhale(inv))
assume_invs.extend(assume_invariants(lambda c: c.current_program.local_state_invariants))
self.seqn_with_info(assume_invs, "Assume local state invariants", res)
# Assume transitive postconditions
assume_transitive_posts = []
self.assume_contract_state(known_interface_ref, assume_transitive_posts, ctx, skip_caller_private=True)
for post in ctx.unchecked_transitive_postconditions():
assume_transitive_posts.append(self.viper_ast.Inhale(post))
for post in ctx.program.transitive_postconditions:
post_expr = self.spec_translator.translate_pre_or_postcondition(post, assume_transitive_posts, ctx)
ppos = self.to_position(post, ctx, rules.INHALE_POSTCONDITION_FAIL)
assume_transitive_posts.append(self.viper_ast.Inhale(post_expr, ppos))
self.seqn_with_info(assume_transitive_posts, "Assume transitive postconditions", res)
no_reentrant_name = ctx.new_local_var_name('no_reentrant_call')
no_reentrant = self.viper_ast.LocalVarDecl(no_reentrant_name, self.viper_ast.Bool)
ctx.new_local_vars.append(no_reentrant)
no_reentrant_cond = no_reentrant.localVar()
# If there were no reentrant calls, reset the contract state.
use_zero_reentrant_call_state = []
self.state_translator.copy_state(ctx.current_old_state, ctx.current_state,
use_zero_reentrant_call_state, ctx)
res.extend(helpers.flattened_conditional(self.viper_ast, no_reentrant_cond,
use_zero_reentrant_call_state, []))
############################################################################################################
# At this point, we have a self state with all the assumptions of a self state in a public state. #
# This self state corresponds to the last state of self after any (zero or more) re-entrant calls. #
# #
# The contract state is at this point also at the public state after the last re-entrant call to self. #
# Due to re-entrant calls, any caller private expression might have gotten modified. But we can assume #
# that they are only modified by self and only in such a way as described in the #
# transitive postconditions. #
############################################################################################################
# Assume caller private in a new contract state
self.state_translator.copy_state(ctx.current_state, ctx.current_old_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.state_translator.havoc_state(ctx.current_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.assume_own_resources_stayed_constant(res, ctx, pos)
assume_caller_private = []
self.assume_contract_state(known_interface_ref, assume_caller_private, ctx)
self.seqn_with_info(assume_caller_private, "Assume caller private", res)
############################################################################################################
# Since no more re-entrant calls can happen, the self state does not change anymore. #
# #
# The contract state is at a point where the last call, which lead to a re-entrant call to self, returned. #
# We can assume all caller private expressions of self stayed constant, since the contract state above. #
# We can only assume that variables captured with a caller private expression did not change, since #
# any other contract might got called which could change everything except caller private expressions. #
############################################################################################################
# Store the states to assert the inter contract invariants during the call
self.state_translator.copy_state(ctx.current_state, curr_state_for_inter_contract_invariant_during,
res, ctx)
# Assume caller private in a new contract state
self.state_translator.copy_state(ctx.current_state, ctx.current_old_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.state_translator.havoc_state(ctx.current_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.assume_own_resources_stayed_constant(res, ctx, pos)
self.seqn_with_info(assume_caller_private_without_receiver, "Assume caller private", res)
self.implicit_resource_caller_private_expressions(interface, to, caller_address, res, ctx)
res.extend(stmt for performs_as_stmts in performs_as_stmts_generators for stmt in performs_as_stmts(2))
############################################################################################################
# The contract state is at the point where the external call returns. Since the last modeled public state, #
# any non-caller-private expression might have changed but also the caller private #
# expressions of the receiver. Therefore, we can only assume that all but the receiver's caller private #
# expressions stayed constant. #
############################################################################################################
# Store the states to assert the inter contract invariants after the call
self.state_translator.copy_state(ctx.current_state, curr_state_for_inter_contract_invariant_after, res, ctx)
self.state_translator.copy_state(ctx.current_old_state, old_state_for_inter_contract_invariant_after,
res, ctx)
# Assume caller private in a new contract state
self.state_translator.copy_state(ctx.current_state, ctx.current_old_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.state_translator.havoc_state(ctx.current_state, res, ctx,
unless=lambda n: n == mangled.SELF)
self.assume_own_resources_stayed_constant(res, ctx, pos)
self.seqn_with_info(assume_caller_private_without_receiver, "Assume caller private", res)
self.implicit_resource_caller_private_expressions(interface, to, caller_address, res, ctx)
############################################################################################################
# The contract is at the end of the external call, only changes to the caller private expressions of the #
# receiver of the external call could have happened. #
# This state models the same state as the previous. But, we must not assert the inter contract invariants #
# in the state where we assumed the postcondition. #
############################################################################################################
# Restore old state for postcondition
self.state_translator.copy_state(old_state_for_postconditions, ctx.current_old_state, res,
ctx, unless=lambda n: n == mangled.SELF)
# Assume type assumptions for allocation maps
self.state_translator.assume_type_assumptions_for_state(
{name: state for name, state in ctx.current_state.items() if StateTranslator.is_allocation(name)},
"State after call", res, ctx)
success = self.viper_ast.Not(fail_cond, pos)
amount = amount or self.viper_ast.IntLit(0)
# Assume postcondition of the external call
if known:
self._assume_interface_specifications(node, interface, function, args, to, amount, success,
return_value, res, ctx)
if modifying:
# Assert inter contract invariants during call
with ctx.state_scope(curr_state_for_inter_contract_invariant_during,
old_state_for_inter_contract_invariant_during):
assert_invs = assert_invariants(lambda c: c.current_program.inter_contract_invariants,
rules.DURING_CALL_INVARIANT_FAIL)
self.seqn_with_info(assert_invs, "Assert inter contract invariants during call", res)
# Assert inter contract invariants after call
with ctx.state_scope(curr_state_for_inter_contract_invariant_after,
old_state_for_inter_contract_invariant_after):
assert_invs = assert_invariants(lambda c: c.current_program.inter_contract_invariants,
rules.DURING_CALL_INVARIANT_FAIL)
self.seqn_with_info(assert_invs, "Assert inter contract invariants after call", res)
self.state_translator.copy_state(ctx.current_state, ctx.current_old_state, res, ctx)
return success, return_value
def forget_about_all_events(self, res, ctx, pos):
# We forget about events by exhaling all permissions to the event predicates, i.e.
# for all event predicates e we do
# exhale forall arg0, arg1, ... :: perm(e(arg0, arg1, ...)) > none ==> acc(e(...), perm(e(...)))
# We use an implication with a '> none' because of a bug in Carbon (TODO: issue #171) where it isn't possible
# to exhale no permissions under a quantifier.
for event in ctx.program.events.values():
event_name = mangled.event_name(event.name)
viper_types = [self.type_translator.translate(arg, ctx) for arg in event.type.arg_types]
event_args = [self.viper_ast.LocalVarDecl(f'$arg{idx}', viper_type, pos)
for idx, viper_type in enumerate(viper_types)]
local_args = [arg.localVar() for arg in event_args]
pa = self.viper_ast.PredicateAccess(local_args, event_name, pos)
perm = self.viper_ast.CurrentPerm(pa, pos)
pap = self.viper_ast.PredicateAccessPredicate(pa, perm, pos)
none = self.viper_ast.NoPerm(pos)
impl = self.viper_ast.Implies(self.viper_ast.GtCmp(perm, none, pos), pap)
trigger = self.viper_ast.Trigger([pa], pos)
forall = self.viper_ast.Forall(event_args, [trigger], impl, pos)
res.append(self.viper_ast.Exhale(forall, pos))
def log_all_events_zero_or_more_times(self, res, ctx, pos):
for event in ctx.program.events.values():
event_name = mangled.event_name(event.name)
viper_types = [self.type_translator.translate(arg, ctx) for arg in event.type.arg_types]
event_args = [self.viper_ast.LocalVarDecl(ctx.new_local_var_name('$arg'), arg_type, pos)
for arg_type in viper_types]
ctx.new_local_vars.extend(event_args)
local_args = [arg.localVar() for arg in event_args]
ctx.event_vars[event_name] = local_args
# Inhale zero or more times write permission
# PermMul variable for unknown permission amount
var_name = ctx.new_local_var_name('$a')
var_decl = self.viper_ast.LocalVarDecl(var_name, self.viper_ast.Int, pos)
ctx.new_local_vars.append(var_decl)
var_perm_mul = var_decl.localVar()
ge_zero_cond = self.viper_ast.GeCmp(var_perm_mul, self.viper_ast.IntLit(0, pos), pos)
assume_ge_zero = self.viper_ast.Inhale(ge_zero_cond, pos)
# PredicateAccessPredicate
pred_acc = self.viper_ast.PredicateAccess(local_args, event_name, pos)
perm_mul = self.viper_ast.IntPermMul(var_perm_mul, self.viper_ast.FullPerm(pos), pos)
pred_acc_pred = self.viper_ast.PredicateAccessPredicate(pred_acc, perm_mul, pos)
log_event = self.viper_ast.Inhale(pred_acc_pred, pos)
# Append both Inhales
res.extend([assume_ge_zero, log_event])
def _assume_interface_specifications(self,
node: ast.Node,
interface: VyperInterface,
function: VyperFunction,
args: List[Expr],
to: Expr,
amount: Expr,
succ: Expr,
return_value: Optional[Expr],
res: List[Stmt],
ctx: Context):
with ctx.interface_call_scope():
body = []
# Define new msg variable
msg_name = ctx.inline_prefix + mangled.MSG
msg_var = TranslatedVar(names.MSG, msg_name, types.MSG_TYPE, self.viper_ast)
ctx.locals[names.MSG] = msg_var
ctx.new_local_vars.append(msg_var.var_decl(ctx))
# Assume msg.sender == self and msg.value == amount
msg = msg_var.local_var(ctx)
svytype = types.MSG_TYPE.member_types[names.MSG_SENDER]
svitype = self.type_translator.translate(svytype, ctx)
msg_sender = helpers.struct_get(self.viper_ast, msg, names.MSG_SENDER, svitype, types.MSG_TYPE)
self_address = helpers.self_address(self.viper_ast)
body.append(self.viper_ast.Inhale(self.viper_ast.EqCmp(msg_sender, self_address)))
vvytype = types.MSG_TYPE.member_types[names.MSG_VALUE]
vvitype = self.type_translator.translate(vvytype, ctx)
msg_value = helpers.struct_get(self.viper_ast, msg, names.MSG_VALUE, vvitype, types.MSG_TYPE)
body.append(self.viper_ast.Inhale(self.viper_ast.EqCmp(msg_value, amount)))
# Add arguments to local vars, assign passed args
for (name, var), arg in zip(function.args.items(), args):
apos = arg.pos()
arg_var = self._translate_var(var, ctx)
ctx.locals[name] = arg_var
lhs = arg_var.local_var(ctx)
if (types.is_numeric(arg_var.type)
and self.arithmetic_translator.is_wrapped(arg)
and self.arithmetic_translator.is_unwrapped(lhs)):
arg_var.is_local = False
lhs = arg_var.local_var(ctx)
elif (types.is_numeric(arg_var.type)
and self.arithmetic_translator.is_unwrapped(arg)
and self.arithmetic_translator.is_wrapped(lhs)):
arg = helpers.w_wrap(self.viper_ast, arg)
elif (not types.is_numeric(arg_var.type)
and self.arithmetic_translator.is_wrapped(arg)):
arg = helpers.w_unwrap(self.viper_ast, arg)
ctx.new_local_vars.append(arg_var.var_decl(ctx))
body.append(self.viper_ast.LocalVarAssign(arg_var.local_var(ctx), arg, apos))
# Add result variable
if function.type.return_type:
ret_name = ctx.inline_prefix + mangled.RESULT_VAR
ret_pos = return_value.pos()
ctx.result_var = TranslatedVar(names.RESULT, ret_name, function.type.return_type,
self.viper_ast, ret_pos, is_local=False)
ctx.new_local_vars.append(ctx.result_var.var_decl(ctx, ret_pos))
if (types.is_numeric(function.type.return_type)
and self.arithmetic_translator.is_unwrapped(return_value)):
return_value = helpers.w_wrap(self.viper_ast, return_value)
body.append(self.viper_ast.LocalVarAssign(ctx.result_var.local_var(ret_pos), return_value, ret_pos))
# Add success variable
succ_name = ctx.inline_prefix + mangled.SUCCESS_VAR
succ_var = TranslatedVar(names.SUCCESS, succ_name, types.VYPER_BOOL, self.viper_ast, succ.pos())
ctx.new_local_vars.append(succ_var.var_decl(ctx))
ctx.success_var = succ_var
body.append(self.viper_ast.LocalVarAssign(succ_var.local_var(ctx), succ, succ.pos()))
translate = self.spec_translator.translate_pre_or_postcondition
pos = self.to_position(node, ctx, rules.INHALE_INTERFACE_FAIL)
with ctx.program_scope(interface):
with ctx.self_address_scope(to):
postconditions = chain(function.postconditions, interface.general_postconditions)
exprs = [translate(post, body, ctx) for post in postconditions]
body.extend(self.viper_ast.Inhale(expr, pos) for expr in exprs)
if ctx.program.config.has_option(names.CONFIG_TRUST_CASTS):
res.extend(body)
else:
implements = helpers.implements(self.viper_ast, to, interface.name, ctx, pos)
res.append(self.viper_ast.If(implements, body, [], pos))
def _translate_var(self, var: VyperVar, ctx: Context) -> TranslatedVar:
pos = self.to_position(var.node, ctx)
name = mangled.local_var_name(ctx.inline_prefix, var.name)
return TranslatedVar(var.name, name, var.type, self.viper_ast, pos) | PypiClean |
/Mezzanine-6.0.0.tar.gz/Mezzanine-6.0.0/mezzanine/core/static/mezzanine/tinymce/themes/modern/theme.min.js | tinymce.ThemeManager.add("modern",function(a){function b(){function b(b){var d,e=[];if(b)return l(b.split(/[ ,]/),function(b){function c(){var c=a.selection;"bullist"==f&&c.selectorChanged("ul > li",function(a,c){for(var d,e=c.parents.length;e--&&(d=c.parents[e].nodeName,"OL"!=d&&"UL"!=d););b.active(a&&"UL"==d)}),"numlist"==f&&c.selectorChanged("ol > li",function(a,c){for(var d,e=c.parents.length;e--&&(d=c.parents[e].nodeName,"OL"!=d&&"UL"!=d););b.active(a&&"OL"==d)}),b.settings.stateSelector&&c.selectorChanged(b.settings.stateSelector,function(a){b.active(a)},!0),b.settings.disabledStateSelector&&c.selectorChanged(b.settings.disabledStateSelector,function(a){b.disabled(a)})}var f;"|"==b?d=null:k.has(b)?(b={type:b},j.toolbar_items_size&&(b.size=j.toolbar_items_size),e.push(b),d=null):(d||(d={type:"buttongroup",items:[]},e.push(d)),a.buttons[b]&&(f=b,b=a.buttons[f],"function"==typeof b&&(b=b()),b.type=b.type||"button",j.toolbar_items_size&&(b.size=j.toolbar_items_size),b=k.create(b),d.items.push(b),a.initialized?c():a.on("init",c)))}),c.push({type:"toolbar",layout:"flow",items:e}),!0}var c=[];if(tinymce.isArray(j.toolbar)){if(0===j.toolbar.length)return;tinymce.each(j.toolbar,function(a,b){j["toolbar"+(b+1)]=a}),delete j.toolbar}for(var d=1;10>d&&b(j["toolbar"+d]);d++);return c.length||j.toolbar===!1||b(j.toolbar||o),c.length?{type:"panel",layout:"stack",classes:"toolbar-grp",ariaRoot:!0,ariaRemember:!0,items:c}:void 0}function c(){function b(b){var c;return"|"==b?{text:"|"}:c=a.menuItems[b]}function c(c){var d,e,f,g,h;if(h=tinymce.makeMap((j.removed_menuitems||"").split(/[ ,]/)),j.menu?(e=j.menu[c],g=!0):e=n[c],e){d={text:e.title},f=[],l((e.items||"").split(/[ ,]/),function(a){var c=b(a);c&&!h[a]&&f.push(b(a))}),g||l(a.menuItems,function(a){a.context==c&&("before"==a.separator&&f.push({text:"|"}),a.prependToContext?f.unshift(a):f.push(a),"after"==a.separator&&f.push({text:"|"}))});for(var i=0;i<f.length;i++)"|"==f[i].text&&(0===i||i==f.length-1)&&f.splice(i,1);if(d.menu=f,!d.menu.length)return null}return d}var d,e=[],f=[];if(j.menu)for(d in j.menu)f.push(d);else for(d in n)f.push(d);for(var g="string"==typeof j.menubar?j.menubar.split(/[ ,]/):f,h=0;h<g.length;h++){var i=g[h];i=c(i),i&&e.push(i)}return e}function d(b){function c(a){var c=b.find(a)[0];c&&c.focus(!0)}a.shortcuts.add("Alt+F9","",function(){c("menubar")}),a.shortcuts.add("Alt+F10","",function(){c("toolbar")}),a.shortcuts.add("Alt+F11","",function(){c("elementpath")}),b.on("cancel",function(){a.focus()})}function e(b,c){function d(a){return{width:a.clientWidth,height:a.clientHeight}}var e,f,g,h;e=a.getContainer(),f=a.getContentAreaContainer().firstChild,g=d(e),h=d(f),null!==b&&(b=Math.max(j.min_width||100,b),b=Math.min(j.max_width||65535,b),m.setStyle(e,"width",b+(g.width-h.width)),m.setStyle(f,"width",b)),c=Math.max(j.min_height||100,c),c=Math.min(j.max_height||65535,c),m.setStyle(f,"height",c),a.fire("ResizeEditor")}function f(b,c){var d=a.getContentAreaContainer();i.resizeTo(d.clientWidth+b,d.clientHeight+c)}function g(e){function f(){if(n&&n.moveRel&&n.visible()&&!n._fixed){var b=a.selection.getScrollContainer(),c=a.getBody(),d=0,e=0;if(b){var f=m.getPos(c),g=m.getPos(b);d=Math.max(0,g.x-f.x),e=Math.max(0,g.y-f.y)}n.fixed(!1).moveRel(c,a.rtl?["tr-br","br-tr"]:["tl-bl","bl-tl","tr-br"]).moveBy(d,e)}}function g(){n&&(n.show(),f(),m.addClass(a.getBody(),"mce-edit-focus"))}function h(){n&&(n.hide(),m.removeClass(a.getBody(),"mce-edit-focus"))}function l(){return n?void(n.visible()||g()):(n=i.panel=k.create({type:o?"panel":"floatpanel",role:"application",classes:"tinymce tinymce-inline",layout:"flex",direction:"column",align:"stretch",autohide:!1,autofix:!0,fixed:!!o,border:1,items:[j.menubar===!1?null:{type:"menubar",border:"0 0 1 0",items:c()},b()]}),a.fire("BeforeRenderUI"),n.renderTo(o||document.body).reflow(),d(n),g(),a.on("nodeChange",f),a.on("activate",g),a.on("deactivate",h),void a.nodeChanged())}var n,o;return j.fixed_toolbar_container&&(o=m.select(j.fixed_toolbar_container)[0]),j.content_editable=!0,a.on("focus",function(){e.skinUiCss?tinymce.DOM.styleSheetLoader.load(e.skinUiCss,l,l):l()}),a.on("blur hide",h),a.on("remove",function(){n&&(n.remove(),n=null)}),e.skinUiCss&&tinymce.DOM.styleSheetLoader.load(e.skinUiCss),{}}function h(f){var g,h,l;return f.skinUiCss&&tinymce.DOM.loadCSS(f.skinUiCss),g=i.panel=k.create({type:"panel",role:"application",classes:"tinymce",style:"visibility: hidden",layout:"stack",border:1,items:[j.menubar===!1?null:{type:"menubar",border:"0 0 1 0",items:c()},b(),{type:"panel",name:"iframe",layout:"stack",classes:"edit-area",html:"",border:"1 0 0 0"}]}),j.resize!==!1&&(h={type:"resizehandle",direction:j.resize,onResizeStart:function(){var b=a.getContentAreaContainer().firstChild;l={width:b.clientWidth,height:b.clientHeight}},onResize:function(a){"both"==j.resize?e(l.width+a.deltaX,l.height+a.deltaY):e(null,l.height+a.deltaY)}}),j.statusbar!==!1&&g.add({type:"panel",name:"statusbar",classes:"statusbar",layout:"flow",border:"1 0 0 0",ariaRoot:!0,items:[{type:"elementpath"},h]}),j.readonly&&g.find("*").disabled(!0),a.fire("BeforeRenderUI"),g.renderBefore(f.targetNode).reflow(),j.width&&tinymce.DOM.setStyle(g.getEl(),"width",j.width),a.on("remove",function(){g.remove(),g=null}),d(g),{iframeContainer:g.find("#iframe")[0].getEl(),editorContainer:g.getEl()}}var i=this,j=a.settings,k=tinymce.ui.Factory,l=tinymce.each,m=tinymce.DOM,n={file:{title:"File",items:"newdocument"},edit:{title:"Edit",items:"undo redo | cut copy paste pastetext | selectall"},insert:{title:"Insert",items:"|"},view:{title:"View",items:"visualaid |"},format:{title:"Format",items:"bold italic underline strikethrough superscript subscript | formats | removeformat"},table:{title:"Table"},tools:{title:"Tools"}},o="undo redo | styleselect | bold italic | alignleft aligncenter alignright alignjustify | bullist numlist outdent indent | link image";i.renderUI=function(b){var c=j.skin!==!1?j.skin||"lightgray":!1;if(c){var d=j.skin_url;d=d?a.documentBaseURI.toAbsolute(d):tinymce.baseURL+"/skins/"+c,tinymce.Env.documentMode<=7?b.skinUiCss=d+"/skin.ie7.min.css":b.skinUiCss=d+"/skin.min.css",a.contentCSS.push(d+"/content"+(a.inline?".inline":"")+".min.css")}return a.on("ProgressState",function(a){i.throbber=i.throbber||new tinymce.ui.Throbber(i.panel.getEl("body")),a.state?i.throbber.show(a.time):i.throbber.hide()}),j.inline?g(b):h(b)},i.resizeTo=e,i.resizeBy=f}); | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lang_html.py | from __future__ import absolute_import
import os
import sys
import logging
import re
import traceback
from pprint import pprint
from codeintel2.common import *
from codeintel2.langintel import LangIntel
from codeintel2.udl import UDLLexer, UDLBuffer, UDLCILEDriver, XMLParsingBufferMixin
from codeintel2.lang_xml import XMLLangIntel
from HTMLTreeParser import html_optional_close_tags
if _xpcom_:
from xpcom.server import UnwrapObject
#---- globals
lang = "HTML"
log = logging.getLogger("codeintel.html")
#---- language support
class HTMLLexer(UDLLexer):
lang = lang
class HTMLLangIntel(XMLLangIntel):
lang = lang
def trg_from_pos(self, buf, pos, implicit=True, DEBUG=False):
"""
Retrieves a codeintel completion trigger based on the current position,
taking into account an HTML-specific context.
In most cases, this will simply be XML code completion. However, if the
caret is within an "id" or "class" attribute value, a CSS anchor or
class completions trigger will be returned.
"""
trg = XMLLangIntel.trg_from_pos(self, buf, pos, implicit, DEBUG)
if trg and trg.type == "attr-enum-values":
accessor = buf.accessor
attrName = accessor.text_range(*accessor.contiguous_style_range_from_pos(trg.pos-3))
if attrName.lower() == "id":
return Trigger("CSS", TRG_FORM_CPLN, "anchors",
pos, implicit)
elif attrName.lower() == "class":
return Trigger("CSS", TRG_FORM_CPLN, "class-names",
pos, implicit)
return trg
def get_valid_tagnames(self, buf, pos, withPrefix=False):
node = buf.xml_node_at_pos(pos)
#print "get_valid_tagnames NODE %s:%s xmlns[%s] %r"%(buf.xml_tree.prefix(node),node.localName,node.ns,node.tag)
handlerclass = buf.xml_tree_handler(node)
tagnames = None
if node is not None: # or not tree.parent(node):
tagnames = set(handlerclass.tagnames(buf.xml_tree, node))
while node is not None and node.localName in html_optional_close_tags:
node = buf.xml_tree.parent(node)
if node is not None:
tagnames = tagnames.union(handlerclass.tagnames(buf.xml_tree, node))
if not tagnames and hasattr(handlerclass, "dataset"):
tagnames = handlerclass.dataset.all_element_types()
if not tagnames:
return None
tagnames = list(tagnames)
tagnames.sort()
if withPrefix and node is not None:
prefix = buf.xml_tree.prefix(node)
if prefix:
return ["%s:%s" % (prefix, name) for name in tagnames]
return tagnames
def cpln_end_tag(self, buf, trg):
node = buf.xml_node_at_pos(trg.pos)
if node is None: return None
tagName = buf.xml_tree.tagname(node)
if not tagName:
return []
# here on, we're only working with HTML documents
line, col = buf.accessor.line_and_col_at_pos(trg.pos)
names = [tagName]
# if this is an optional close node, get parents until a node that
# requires close is found
while node is not None and node.localName in html_optional_close_tags:
node = buf.xml_tree.parent(node)
if node is None:
break
if not node.end:
names.append(buf.xml_tree.tagname(node))
continue
return [('element',tagName+">") for tagName in names]
class HTMLBuffer(UDLBuffer, XMLParsingBufferMixin):
lang = lang
m_lang = "HTML"
csl_lang = "JavaScript"
css_lang = "CSS"
# Characters that should close an autocomplete UI:
# - wanted for XML completion: ">'\" "
# - wanted for CSS completion: " ('\";},.>"
# - wanted for JS completion: "~`!@#%^&*()-=+{}[]|\\;:'\",.<>?/ "
# - dropping ':' because I think that may be a problem for XML tag
# completion with namespaces (not sure of that though)
# - dropping '[' because need for "<!<|>" -> "<![CDATA[" cpln
# - dropping '-' because causes problem with CSS and XML (bug 78312)
# - dropping '!' because causes problem with CSS "!important" (bug 78312)
cpln_stop_chars = "'\" ;,~`@#%^&*()=+{}]|\\,.<>?/"
class HTMLCILEDriver(UDLCILEDriver):
lang = lang
csl_lang = "JavaScript"
css_lang = "CSS"
#---- registration
def register(mgr):
"""Register language support with the Manager."""
mgr.set_lang_info(lang,
silvercity_lexer=HTMLLexer(),
buf_class=HTMLBuffer,
langintel_class=HTMLLangIntel,
cile_driver_class=HTMLCILEDriver,
is_cpln_lang=True) | PypiClean |
/Flask-Wizard-0.5.28.tar.gz/Flask-Wizard-0.5.28/flask_wizard/telegram.py | from __future__ import absolute_import
from __future__ import print_function
import os
import json
import requests
import base64
import sys
import random
import telepot
import uuid
import apiai
import ast
import pprint
import time
from timeit import default_timer as timer
from flask import request
from actions import *
class TelegramHandler(object):
"""
The facebook handler acts as the interface to handle all requests coming
from messenger.
It parses the payload and responds
"""
def __init__(self,bot_token, ozz_guid, actions, redis_db, mongo, log):
self.redis_db = redis_db
self.mongo = mongo
self.log = log
self.bot_token = bot_token
self.update_id = 0
with open(actions,"r") as jsonFile:
self.actions = json.load(jsonFile)
if ozz_guid != "":
if ozz_guid[:4] == 'api_':
self.api = apiai.ApiAI(ozz_guid[4:])
print("Telegram endpoint - /api/messages/telegram")
def responds(self,*args,**kwargs):
data = request.get_data()
if(type(data) == type(b"")):
data = json.loads(data.decode())
print (data)
#print (data)
if int(data["update_id"]) >= self.update_id:
self.update_id = int(data["update_id"])
if "message" in data:
frm = data["message"]["from"]
message = data["message"]["text"]
IdOfSender = frm["id"]
start = timer()
intent=None
entities=None
action=None
if self.api:
r = self.api.text_request()
r.session_id = uuid.uuid4().hex
r.query = message
res = r.getresponse()
res = json.loads(res.read().decode('utf-8'))
intent = res["result"]["action"]
if intent == '':
intent = res["result"]["metadata"]["intentName"]
response = res["result"]["fulfillment"]["speech"]
entities = res["result"]['parameters']
if intent in self.actions:
if type(self.actions[intent]) == list:
response = random.choice(self.actions[intent])
self.send_message(IdOfSender,response)
else:
#func = eval(self.actions[intent])
session = {}
session['user']= {
'id':IdOfSender
}
session['intent'] = intent
session['entities'] = entities
session['message'] = message
session['channel'] = 'telegram'
message = eval(self.actions[intent])
self.send_message(IdOfSender, message)
elif response != "":
end = timer()
runtime = str(end - start)
if self.mongo:
log_object = {"message":message,"channel":"telegram","intent":intent,"entities":entities,"action":action,"response":str(response),"runtime":runtime,"time":str(time.time())}
self.mongo.db.logs.insert_one(log_object)
self.send_message(IdOfSender, response)
else:
end = timer()
runtime = str(end - start)
if self.mongo:
log_object = {"message":message,"channel":"telegram","intent":intent,"entities":entities,"action":action,"response":str(message),"runtime":runtime,"time":str(time.time())}
self.mongo.db.logs.insert_one(log_object)
self.send_message(IdOfSender, message)
return 'Responded!'
def send_message(self, id, text):
"""
Send the message text to recipient with id recipient.
"""
print ('Sending Mssg',text)
if sys.version_info >= (3, 0):
message = text
else:
message = text.decode('unicode_escape')
token = self.bot_token
bot = telepot.Bot(token)
r = bot.sendMessage(id,text)
print (r) | PypiClean |
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/models/entitydata.py | from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
import os.path
import shutil
from django.conf import settings
from annalist import layout
from annalist.exceptions import Annalist_Error
from annalist.identifiers import ANNAL
from annalist import util
from annalist.models.entity import Entity
class EntityData(Entity):
_entitytype = ANNAL.CURIE.EntityData
_entitytypeid = None
_entityroot = layout.TYPEDATA_ENTITY_PATH
_entityview = layout.TYPEDATA_ENTITY_VIEW
_entitybase = layout.ENTITY_BASE_REF
_entityfile = layout.ENTITY_DATA_FILE
_contextbase = layout.ENTITY_COLL_BASE_REF
_contextref = layout.ENTITY_CONTEXT_FILE
def __init__(self, parent, entity_id):
"""
Initialize a new Entity Data object, without metadata.
EntityData objects sit in this entity storage hierarchy:
Site
Collection
RecordTypeData
EntityData
This arrangement allows entities for different record types
to be saved into separate directories.
parent is the parent collection (RecordTypeData) from
which the entity is descended.
entity_id the local identifier (slug) for the data record
"""
# print "@@ EntityData.__init__ id %s, _entitytypeid %s, parent_id %s"%(entity_id, self._entitytypeid, parent.get_id())
self._entitytypeid = self._entitytypeid or parent.get_id()
super(EntityData, self).__init__(parent, entity_id)
self._paramdict = { 'type_id': self._entitytypeid, 'id': entity_id }
self._entityref = layout.COLL_BASE_ENTITY_REF%self._paramdict
self._entityviewuri = parent._entityurl+self._entityview%self._paramdict
# log.debug("EntityData: _entityviewuri %s"%(self._entityviewuri))
return
def _migrate_filenames(self):
"""
Return filename migration list for entity data
Returns a list of filenames used for the current entity type in previous
versions of Annalist software. If the expected filename is not found when
attempting to read a file, the _load_values() method calls this function to
look for any of the filenames returned. If found, the file is renamed
to the current version filename.
"""
return [layout.ENTITY_OLD_DATA_FILE]
# End. | PypiClean |
/LstGen-0.6.2.tar.gz/LstGen-0.6.2/CHANGES.md | # Changes
## 0.6.2
* Added 2023 PAP from July
## 0.6.1
* Added 2023 PAP
## 0.6.0
* Added golang generator which supersedes go generator (thanks polderudo)
* Fixed empty if-body in python
* Added javascript example (thanks 0xCCD)
* Added 2022 PAP (thanks 0xCCD)
## 0.5.4
* Added php BigDecimal fixes (thanks tburschka)
## 0.5.3
* Added python3.9 ast module compatiblity
## 0.5.2
* Added resources for year 2021 (thanks 0xCCD)
## 0.5.1
* Added fix for double types in Go (thanks knipknap)
## 0.5.0
* Added Go support (thanks knipknap for the code and Dexus for the review!)
## 0.4.3
* Fixed default value for "double" numbers
* Removee size literals (L/D) in python code
* Improved PAP naming (thanks knipknap)
## 0.4.2
* Added resources for year 2020 (thanks csarn)
* Fixed README python example (thanks csarn)
## 0.4.1
* Fixed CLI examples in README
## 0.4.0
* Added PAP for 2018 and 2019
* Fixed BMF URIs
* Added "version" cli option
## 0.3.2
* Make pypi happy
## 0.3.1
* Added support for 2017 PAP
* Fixed unary operator conversion
* Replaced Decimal.\_rescale with Decimal.quantize to
remain python3 compliant
## 0.3.0
* Added JS support
* Refactored generators for java-like languages
* Cleaned up a bit to satisfy pylint
## 0.2.0
* Refactored writers to generators
* Added basic tests
* Added BigDecimal PHP proxy class
## 0.1.1
* Fixed packaging issues for README.md and CHANGES.md
## 0.1.0
* Initial version
| PypiClean |
/FYS2130_FreqAn-0.1.5.tar.gz/FYS2130_FreqAn-0.1.5/FYS2130_FreqAn/Freq_Analysis.py | import numpy as np
import matplotlib.pyplot as plt
import time
class Freq_Analysis:
def __init__(self, signal, fs):
self.signal = signal
self.fs = fs # Sample frekvens
self.dt = 1 / fs # Tid mellom samplingspunkter
self.N = len(signal) # Antall samplingspunkter
self.T = self.dt*self.N # Total samplingstid
self.t = np.linspace(0, self.T, self.N)
def plot_Fourier(self, xlim=None, show=True):
self.show_FT = show
if xlim is None: xlim = self.fs/2
self.xlim = xlim
X_k = np.fft.fft(self.signal) # FT av samplet frekvens
FT_freq = np.fft.fftfreq(int(self.N), self.dt) # FT-frekvens (korrekt)
t = np.linspace(0, self.N*self.dt, int(self.N))
fig = plt.figure(figsize=(8, 4))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.plot(t, self.signal, label='Sampled signal')
ax2.plot(FT_freq, abs(X_k), label='Fourier Transform')
fig.suptitle('Fourier Transform', weight='bold', fontsize=20)
ax1.set_xlabel('t [s]', weight='bold', fontsize=18)
ax1.set_ylabel('x_n', weight='bold', fontsize=18)
ax2.set_xlabel('f [Hz]', weight='bold', fontsize=18)
ax2.set_ylabel('X_k', weight='bold', fontsize=18)
ax2.set_xlim(-xlim, xlim) # Setter grenser på begge plott så det blir enklere å se, siden de har ulike nyquistfrekvenser
fig.tight_layout()
ax1.legend(prop={'size':14}); ax2.legend(prop={'size':14})
if show is True: plt.show()
return FT_freq, X_k
# Morlet-Wavelet
def wavelet(self, tn, omega_a, tk, K):
C = 0.798 * omega_a / (fs*K)
# C = 1
w = C*(np.exp(-1j*omega_a*(tn - tk)) - np.exp(-K**2))*np.exp(-omega_a**2 * (tn - tk)**2 / (2*K)**2)
"""
Animerer for å sjekke om wavelet faktisk beveger seg gjennom signal
(C=1 hensiktsmessig her)
"""
# plt.plot(tn, x_n, 'k')
# plt.plot(tn, np.real(w))
# plt.plot(tn, np.imag(w))
# plt.draw()
# plt.ylim(-A2, A2)
# plt.pause(0.01)
# plt.clf()
return w
# Wavelet-transformasjon i tidsdomenet
def wavelet_transform(self, x_n, omega_a, tk, K):
tn = self.t.copy()
gamma = np.sum(x_n * self.wavelet(tn, omega_a, tk, K).conjugate())
return gamma
# Lager selve diagrammet ved å iterere gjennom verdier
def wavelet_diagram(self, omega_a, K):
"""
Denne skjer i tidsdomenet, og er vesentlig tregere enn i frekvensdomenet.
Denne har derfor ikke blitt oppdatert.
"""
x_n = self.signal.copy()
self.tk = self.t.copy()
self.omega_a = omega_a
N = len(self.tk)
M = len(omega_a)
WT = np.zeros([N, M], dtype=np.complex128)
for m in range(M):
for n in range(N):
WT[n,m] = self.wavelet_transform(x_n, self.omega_a[m], self.tk[n], K)
return WT
# Fouriertransform av Morlet-wavelet
def FT_wavelet(self, omega, omega_a, K):
w = 2 * (np.exp(-(K * (omega - omega_a)/omega_a)**2) - np.exp(-K**2) * np.exp(-(K*omega/omega_a)**2))
return w
# Den raskere algoritmen som bruker Konvolusjonsteoremet i frekvensdomenet
def faster_wavelet_diagram(self, omega_a, K, sample_skip=1, time_start=0, time_end=None, show=True, ani=False):
if time_end is None: time_end = self.T
tk = np.linspace(0, self.N*self.dt, int(self.N))[::sample_skip]
where_to_solve = np.logical_and(tk >= time_start, tk <= time_end)
tk = tk[where_to_solve]
N = len(tk)
fs = 1 / (self.dt*sample_skip)
dt = 1 / fs
omega_a_mesh, tk_mesh = np.meshgrid(omega_a, tk, indexing='ij')
omega_0 = np.fft.fftfreq(int(N), dt) * 2*np.pi
x_n = self.signal.copy()[::sample_skip]
x_n = x_n[where_to_solve]
x_nFT = np.fft.fft(x_n)
N = len(tk)
M = len(omega_a)
WT = np.zeros([M, N], dtype=np.complex128)
if ani is True:
x_nFTmax = np.max(abs(x_nFT))
# Animerer hva som skjerm i en WT i frekvensdomenet
for j in range(M):
W = self.FT_wavelet(omega_0, omega_a[j], K)
Wmax = np.max(W)
plt.plot(omega_0, abs(x_nFT) / x_nFTmax, 'k', label='FT')
plt.plot(omega_0, W / Wmax, label='FT-wavelet') # Normaliserer plottene for illustrasjon
plt.plot(omega_0, W / Wmax * abs(x_nFT) / x_nFTmax, 'r', label='product')
plt.draw()
plt.title("Takes product of the two FT's", weight='bold', fontsize=20)
plt.xlabel('omega [1/s]', weight='bold', fontsize=20)
plt.ylabel('FT', weight='bold', fontsize=20)
plt.xlim(-np.max(omega_a), np.max(omega_a))
plt.legend()
plt.pause(0.01)
plt.clf()
plt.close()
# Try-blocken sjekker om self.show_FT eksisterer. Hvis den ikke gjør det så går den videre,
# hvis den eksisterer sjekker vi om den er false.
try:
self.show_FT
except AttributeError:
pass
else:
if self.show_FT is False: self.plot_Fourier(xlim=self.xlim, show=False)
# Regner ut selve WT transformasjonen og lagrer verdier
for i in range(M):
WT[i, :] = np.fft.ifft(x_nFT * self.FT_wavelet(omega_0, omega_a[i], K)) # Konvolusjonsteoremet
freq_mesh = omega_a_mesh.copy() / (2*np.pi)
fig = plt.figure()
ax = fig.add_subplot()
p = ax.contourf(tk_mesh, freq_mesh, abs(WT), levels=300, cmap='hot')
cbar_ax = fig.colorbar(p, ax=ax, aspect=10)
cbar_ax.set_label('Amplitude', weight='bold', fontsize=20)
ax.set_xlabel('t [s]', weight='bold', fontsize=20); ax.set_ylabel('freq [1/s]', weight='bold', fontsize=20)
ax.set_title(f'K = {K}', weight='bold', fontsize=20)
fig.tight_layout()
if show is True: plt.show()
return freq_mesh, tk_mesh, WT | PypiClean |
/KratosStructuralMechanicsApplication-9.4-cp310-cp310-win_amd64.whl/KratosMultiphysics/StructuralMechanicsApplication/automatic_rayleigh_parameters_computation_process.py | import KratosMultiphysics as KM
from KratosMultiphysics import eigen_solver_factory
import KratosMultiphysics.StructuralMechanicsApplication as SMA
def Factory(settings, Model):
if not isinstance(settings, KM.Parameters):
raise Exception("Expected input shall be a Parameters object, encapsulating a json string")
return AutomaticRayleighComputationProcess(Model, settings["Parameters"])
# All the processes python processes should be derived from "Process"
class AutomaticRayleighComputationProcess(KM.Process):
"""This class is used in order to compute automatically the Rayleigh damping parameters computing in first place the eigenvalues of the system
Only the member variables listed below should be accessed directly.
Public member variables:
Model -- the container of the different model parts.
settings -- Kratos parameters containing the settings.
"""
def __init__(self, Model, settings):
""" The default constructor of the class
Keyword arguments:
self -- It signifies an instance of a class.
Model -- the container of the different model parts.
settings -- Kratos parameters containing solver settings.
"""
KM.Process.__init__(self)
# Settings string in json format
default_parameters = KM.Parameters("""
{
"help" :"This class is used in order to compute automatically the Rayleigh damping parameters computing in first place the eigenvalues of the system",
"mesh_id" : 0,
"model_part_name" : "Structure",
"echo_level" : 0,
"write_on_properties" : true,
"damping_ratio_0" : 0.0,
"damping_ratio_1" : -1.0,
"eigen_values_vector" : [0.0],
"eigen_system_settings" : {
"solver_type" : "eigen_eigensystem"
}
}
""")
# Setting solver settings
if settings.Has("eigen_system_settings"):
if not settings["eigen_system_settings"].Has("solver_type"):
settings["eigen_system_settings"].AddValue("solver_type", default_parameters["eigen_system_settings"]["solver_type"])
else:
settings.AddValue("eigen_system_settings", default_parameters["eigen_system_settings"])
solver_type = settings["eigen_system_settings"]["solver_type"].GetString()
eigen_system_settings = self._auxiliary_eigen_settings(solver_type)
default_parameters["eigen_system_settings"] = eigen_system_settings["eigen_system_settings"]
# Overwrite the default settings with user-provided parameters
self.settings = settings
self.settings.RecursivelyValidateAndAssignDefaults(default_parameters)
# We define the model parts
self.model = Model
self.main_model_part = self.model[self.settings["model_part_name"].GetString()]
def ExecuteBeforeSolutionLoop(self):
""" This method is executed before starting the time loop
Keyword arguments:
self -- It signifies an instance of a class.
"""
# The general damping ratios
damping_ratio_0 = self.settings["damping_ratio_0"].GetDouble()
damping_ratio_1 = self.settings["damping_ratio_1"].GetDouble()
# We get the model parts which divide the problem
current_process_info = self.main_model_part.ProcessInfo
existing_computation = current_process_info.Has(SMA.EIGENVALUE_VECTOR)
# Create auxiliary parameters
compute_damping_coefficients_settings = KM.Parameters("""
{
"echo_level" : 0,
"damping_ratio_0" : 0.0,
"damping_ratio_1" : -1.0,
"eigen_values_vector" : [0.0]
}
""")
# Setting custom parameters
compute_damping_coefficients_settings["echo_level"].SetInt(self.settings["echo_level"].GetInt())
compute_damping_coefficients_settings["damping_ratio_0"].SetDouble(damping_ratio_0)
compute_damping_coefficients_settings["damping_ratio_1"].SetDouble(damping_ratio_1)
# We check if the values are previously defined
properties = self.main_model_part.GetProperties()
for prop in properties:
if prop.Has(SMA.SYSTEM_DAMPING_RATIO):
self.settings["damping_ratio_0"].SetDouble(prop.GetValue(SMA.SYSTEM_DAMPING_RATIO))
break
for prop in properties:
if prop.Has(SMA.SECOND_SYSTEM_DAMPING_RATIO):
self.settings["damping_ratio_1"].SetDouble(prop.GetValue(SMA.SECOND_SYSTEM_DAMPING_RATIO))
break
# We have computed already the eigen values
current_process_info = self.main_model_part.ProcessInfo
precomputed_eigen_values = self.settings["eigen_values_vector"].GetVector()
if len(precomputed_eigen_values) > 1:
compute_damping_coefficients_settings["eigen_values_vector"].SetVector(precomputed_eigen_values)
else:
# If not computed eigen values already
if not existing_computation:
KM.Logger.PrintInfo("::[MechanicalSolver]::", "EIGENVALUE_VECTOR not previously computed. Computing automatically, take care")
eigen_linear_solver = eigen_solver_factory.ConstructSolver(self.settings["eigen_system_settings"])
builder_and_solver = KM.ResidualBasedBlockBuilderAndSolver(eigen_linear_solver)
eigen_scheme = SMA.EigensolverDynamicScheme()
eigen_solver = SMA.EigensolverStrategy(self.main_model_part, eigen_scheme, builder_and_solver,
self.mass_matrix_diagonal_value,
self.stiffness_matrix_diagonal_value)
eigen_solver.Solve()
# Setting the variable RESET_EQUATION_IDS
current_process_info[SMA.RESET_EQUATION_IDS] = True
eigenvalue_vector = current_process_info.GetValue(SMA.EIGENVALUE_VECTOR)
compute_damping_coefficients_settings["eigen_values_vector"].SetVector(eigenvalue_vector)
# We compute the coefficients
coefficients_vector = SMA.ComputeDampingCoefficients(compute_damping_coefficients_settings)
# We set the values
if self.settings["write_on_properties"].GetBool():
for prop in self.main_model_part.Properties:
prop.SetValue(SMA.RAYLEIGH_ALPHA, coefficients_vector[0])
if current_process_info.Has(KM.COMPUTE_LUMPED_MASS_MATRIX):
if current_process_info[KM.COMPUTE_LUMPED_MASS_MATRIX]:
prop.SetValue(SMA.RAYLEIGH_BETA, 0.0)
else:
prop.SetValue(SMA.RAYLEIGH_BETA, coefficients_vector[1])
else:
prop.SetValue(SMA.RAYLEIGH_BETA, coefficients_vector[1])
else:
current_process_info.SetValue(SMA.RAYLEIGH_ALPHA, coefficients_vector[0])
if current_process_info.Has(KM.COMPUTE_LUMPED_MASS_MATRIX):
if current_process_info[KM.COMPUTE_LUMPED_MASS_MATRIX]:
current_process_info.SetValue(SMA.RAYLEIGH_BETA, 0.0)
else:
current_process_info.SetValue(SMA.RAYLEIGH_BETA, coefficients_vector[1])
else:
current_process_info.SetValue(SMA.RAYLEIGH_BETA, coefficients_vector[1])
def _auxiliary_eigen_settings(self, solver_type):
""" This method returns the settings for the eigenvalues computations
Keyword arguments:
self -- It signifies an instance of a class.
"""
if solver_type == "feast":
eigen_system_settings = KM.Parameters("""
{
"eigen_system_settings" : {
"solver_type" : "feast",
"echo_level" : 0,
"tolerance" : 1e-10,
"symmetric" : true,
"e_min" : 0.0,
"e_max" : 4.0e5,
"number_of_eigenvalues" : 2,
"subspace_size" : 15
}
}
""")
self.mass_matrix_diagonal_value = 1.0
self.stiffness_matrix_diagonal_value = -1.0
else:
eigen_system_settings = KM.Parameters("""
{
"eigen_system_settings" : {
"solver_type" : "eigen_eigensystem"
}
}
""")
eigen_system_settings["eigen_system_settings"]["solver_type"].SetString(solver_type)
self.mass_matrix_diagonal_value = 0.0
self.stiffness_matrix_diagonal_value = 1.0
return eigen_system_settings | PypiClean |
/DataProperty-1.0.1.tar.gz/DataProperty-1.0.1/dataproperty/_dataproperty.py | import typing
from decimal import Decimal
from typing import Any, Optional, cast
import typepy
from mbstrdecoder import MultiByteStrDecoder
from typepy import (
Bool,
DateTime,
Dictionary,
Infinity,
Integer,
IpAddress,
Nan,
NoneType,
NullString,
RealNumber,
StrictLevel,
String,
Typecode,
TypeConversionError,
)
from typepy.type import AbstractType
from ._align import Align
from ._align_getter import align_getter
from ._base import DataPeropertyBase
from ._common import DefaultValue
from ._function import calc_ascii_char_width, get_number_of_digit
from ._preprocessor import Preprocessor
from .typing import FloatType, StrictLevelMap, TypeHint
class DataProperty(DataPeropertyBase):
__slots__ = (
"__data",
"__no_ansi_escape_data",
"__align",
"__integer_digits",
"__additional_format_len",
"__length",
"__ascii_char_width",
)
__type_class_list: typing.List[AbstractType] = [
NoneType,
Integer,
Infinity,
Nan,
IpAddress,
RealNumber,
Bool,
typepy.List,
Dictionary,
DateTime,
NullString,
String,
]
def __init__(
self,
data: Any,
preprocessor: Optional[Preprocessor] = None,
type_hint: TypeHint = None,
float_type: Optional[FloatType] = None,
format_flags: Optional[int] = None,
datetime_format_str: str = DefaultValue.DATETIME_FORMAT,
strict_level_map: Optional[StrictLevelMap] = None,
east_asian_ambiguous_width: int = 1,
) -> None:
super().__init__(
format_flags=format_flags,
is_formatting_float=True,
datetime_format_str=datetime_format_str,
east_asian_ambiguous_width=east_asian_ambiguous_width,
)
self.__additional_format_len: Optional[int] = None
self.__align: Optional[Align] = None
self.__ascii_char_width: Optional[int] = None
self.__integer_digits: Optional[int] = None
self.__length: Optional[int] = None
if preprocessor is None:
preprocessor = Preprocessor()
data, no_ansi_escape_data = preprocessor.preprocess(data)
self.__set_data(data, type_hint, float_type, strict_level_map)
if no_ansi_escape_data is None or len(data) == len(no_ansi_escape_data):
self.__no_ansi_escape_data: Optional[DataProperty] = None
else:
self.__no_ansi_escape_data = DataProperty(no_ansi_escape_data, float_type=float_type)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, DataProperty):
return False
if self.typecode != other.typecode:
return False
if self.typecode == Typecode.NAN:
return True
return self.data == other.data
def __ne__(self, other: Any) -> bool:
if not isinstance(other, DataProperty):
return True
if self.typecode != other.typecode:
return True
if self.typecode == Typecode.NAN:
return False
return self.data != other.data
def __repr__(self) -> str:
element_list = []
if self.typecode == Typecode.DATETIME:
element_list.append(f"data={str(self.data):s}")
else:
try:
element_list.append("data=" + self.to_str())
except UnicodeEncodeError:
element_list.append(f"data={MultiByteStrDecoder(self.data).unicode_str}")
element_list.extend(
[
f"type={self.typename:s}",
f"align={self.align.align_string}",
f"ascii_width={self.ascii_char_width:d}",
]
)
if Integer(self.length).is_type():
element_list.append(f"length={self.length}")
if Integer(self.integer_digits).is_type():
element_list.append(f"int_digits={self.integer_digits}")
if Integer(self.decimal_places).is_type():
element_list.append(f"decimal_places={self.decimal_places}")
if Integer(self.additional_format_len).is_type():
element_list.append(f"extra_len={self.additional_format_len}")
return ", ".join(element_list)
@property
def align(self) -> Align:
if not self.__align:
if self.is_include_ansi_escape:
assert self.no_ansi_escape_dp
self.__align = self.no_ansi_escape_dp.align
else:
self.__align = align_getter.get_align_from_typecode(self.typecode)
assert self.__align
return self.__align
@property
def decimal_places(self) -> Optional[int]:
"""
:return:
Decimal places if the ``data`` type either ``float`` or
``decimal.Decimal``. Returns ``0`` if the ``data`` type is ``int``.
Otherwise, returns ``float("nan")``.
:rtype: int
"""
if self._decimal_places is None:
self.__set_digit()
return self._decimal_places
@property
def data(self) -> Any:
"""
:return: Original data value.
:rtype: Original data type.
"""
return self.__data
@property
def is_include_ansi_escape(self) -> bool:
if self.no_ansi_escape_dp is None:
return False
return self.length != self.no_ansi_escape_dp.length
@property
def no_ansi_escape_dp(self) -> Optional["DataProperty"]:
return self.__no_ansi_escape_data
@property
def length(self) -> Optional[int]:
"""
:return: Length of the ``data``.
:rtype: int
"""
if self.__length is None:
self.__length = self.__get_length()
return self.__length
@property
def ascii_char_width(self) -> int:
if self.__ascii_char_width is None:
self.__ascii_char_width = self.__calc_ascii_char_width()
return self.__ascii_char_width
@property
def integer_digits(self) -> Optional[int]:
"""
:return:
Integer digits if the ``data`` type either
``int``/``float``/``decimal.Decimal``.
Otherwise, returns ``None``.
:rtype: int
"""
if self.__integer_digits is None:
self.__set_digit()
return self.__integer_digits
@property
def additional_format_len(self) -> int:
if self.__additional_format_len is None:
self.__additional_format_len = self.__get_additional_format_len()
return self.__additional_format_len
def get_padding_len(self, ascii_char_width: int) -> int:
if self.typecode in (Typecode.LIST, Typecode.DICTIONARY):
unicode_str_len = DataProperty(MultiByteStrDecoder(str(self.data)).unicode_str).length
assert unicode_str_len
return max(
ascii_char_width - (self.ascii_char_width - unicode_str_len),
0,
)
try:
return max(ascii_char_width - (self.ascii_char_width - cast(int, self.length)), 0)
except TypeError:
return ascii_char_width
def to_str(self) -> str:
return self.format_str.format(self.data)
def __get_additional_format_len(self) -> int:
if not RealNumber(self.data, strip_ansi_escape=False).is_type():
return 0
format_len = 0
if Decimal(self.data) < 0:
# for minus character
format_len += 1
return format_len
def __get_base_float_len(self) -> int:
assert self.integer_digits is not None
assert self.decimal_places is not None
if any([self.integer_digits < 0, self.decimal_places < 0]):
raise ValueError("integer digits and decimal places must be greater or equals to zero")
float_len = self.integer_digits + self.decimal_places
if self.decimal_places > 0:
# for dot
float_len += 1
return float_len
def __get_length(self) -> Optional[int]:
if self.typecode in (Typecode.DICTIONARY, Typecode.LIST, Typecode.STRING):
return len(self.data)
return None
def __calc_ascii_char_width(self) -> int:
if self.typecode == Typecode.INTEGER:
return cast(int, self.integer_digits) + self.additional_format_len
if self.typecode == Typecode.REAL_NUMBER:
return self.__get_base_float_len() + self.additional_format_len
if self.typecode == Typecode.DATETIME:
try:
return len(self.to_str())
except ValueError:
# reach to this line if the year <1900.
# the datetime strftime() methods require year >= 1900.
return len(str(self.data))
if self.is_include_ansi_escape:
assert self.no_ansi_escape_dp
return self.no_ansi_escape_dp.ascii_char_width
try:
unicode_str = MultiByteStrDecoder(self.data).unicode_str
except ValueError:
unicode_str = self.to_str()
return calc_ascii_char_width(unicode_str, self._east_asian_ambiguous_width)
def __set_data(
self,
data: Any,
type_hint: TypeHint,
float_type: Optional[FloatType],
strict_level_map: Optional[StrictLevelMap],
) -> None:
if float_type is None:
float_type = DefaultValue.FLOAT_TYPE
if strict_level_map is None:
strict_level_map = DefaultValue.STRICT_LEVEL_MAP
if type_hint:
type_obj = type_hint(
data, strict_level=StrictLevel.MIN, float_type=float_type, strip_ansi_escape=False
)
self._typecode = type_obj.typecode
self.__data = type_obj.try_convert()
if type_hint(
self.__data,
strict_level=StrictLevel.MAX,
float_type=float_type,
strip_ansi_escape=False,
).is_type():
return
for type_class in self.__type_class_list:
strict_level = strict_level_map.get(
type_class(None).typecode, strict_level_map.get("default", StrictLevel.MAX)
)
if self.__try_convert_type(data, type_class, strict_level, float_type):
return
raise TypeConversionError(
f"failed to convert: data={data}, strict_level={strict_level_map}"
)
def __set_digit(self) -> None:
integer_digits, decimal_places = get_number_of_digit(self.__data)
self.__integer_digits = integer_digits
self._decimal_places = decimal_places
def __try_convert_type(
self,
data: Any,
type_class: AbstractType,
strict_level: int,
float_type: Optional[FloatType],
) -> bool:
type_obj = type_class(data, strict_level, float_type=float_type, strip_ansi_escape=False)
try:
self.__data = type_obj.convert()
except TypeConversionError:
return False
self._typecode = type_obj.typecode
return True | PypiClean |
/3q-0.1.6.tar.gz/3q-0.1.6/qqq/github.py | from typing import Union, Dict
import requests
from requests.auth import HTTPBasicAuth
class GitHub:
_api_base_url = 'https://api.github.com'
def __init__(self, username, token):
self.username = username
self.token = token
@classmethod
def verify_token(cls, username: str, token: str) -> bool:
"""
Verify a GitHub personal access token.
:param username: The GitHub user associated with the token
:param token: The personal access token
:return:
"""
r = requests.get('https://api.github.com/user', auth=HTTPBasicAuth(username, token))
return r.status_code == 200
def get_user(self, username: str) -> Union[Dict, None]:
"""
Get a GitHub user.
:param username: The user to get from GitHub.
:return: JSON response from GitHub API if the user exists
"""
r = requests.get(f'{self._api_base_url}/users/{username}')
return r.json() if r.status_code == 200 else None
def create_repo(self, name: str) -> Union[Dict, None]:
"""
Create a private repo on GitHub.
:param name: The name of the repo
:return: JSON response from GitHub API if the request was successful
"""
r = requests.post(
f'{self._api_base_url}/user/repos',
json={'name': name, 'private': True},
auth=HTTPBasicAuth(self.username, self.token)
)
return r.json() if r.status_code == 201 else None
def add_collaborator(self, repo_name: str, username: str, admin: bool = False) -> bool:
"""
Add a collaborator to a GitHub repo.
:param repo_name: The name of the repo on GitHub
:param username: The username of the collaborator
:param admin: Whether or not the collaborator should have admin privileges
:return: True if the request was successful
"""
r = requests.put(
f'{self._api_base_url}/repos/{self.username}/{repo_name}/collaborators/{username}',
auth=HTTPBasicAuth(self.username, self.token),
json={'permission': 'admin'} if admin else None
)
return r.status_code in (201, 204) | PypiClean |
/Dulcinea-0.11.tar.gz/Dulcinea-0.11/lib/sort.py | import re
from quixote.html import stringify
def sort(seq):
"""(seq) -> [any]
Sort 'seq', which must be a sequence object of any type.
If 'seq' is a list object, it will be sorted in-place and returned.
Otherwise, 'seq' will be converted to a list, sorted in-place,
and returned.
Don't just use 'sort(list)' instead of 'list.sort()'; the latter
makes it clear that an existing list is being sorted in-place.
Instead, replace code like this:
k = dict.keys() ; k.sort()
values = list(object.get_some_funky_sequence()) ; values.sort()
with code like this:
k = sort(dict.keys())
values = sort(object.get_some_funky_sequence())
"""
if not isinstance(seq, list):
seq = list(seq)
seq.sort()
return seq
def _sort_and_undecorate(dlist):
dlist.sort()
list = [val for (_, val) in dlist]
return list
def str_sort(seq):
"""(seq) -> []
Sort 'seq' by the str() of each element.
"""
dlist = [(stringify(val), val) for val in seq]
return _sort_and_undecorate(dlist)
def lexical_sort(seq):
"""(seq) -> []
Sort 'seq' by the stringify().lower() of each element.
"""
dlist = [(stringify(val).lower(), val) for val in seq]
return _sort_and_undecorate(dlist)
def attr_sort(seq, attr):
"""(seq, attr : string) -> []
Sort 'seq' by the attribute 'attr' of each element.
"""
dlist = [(getattr(val, attr), val) for val in seq]
return _sort_and_undecorate(dlist)
def lex_attr_sort(seq, attr):
"""(seq, attr : string) -> []
Sort 'seq' by the stringify().lower() of the 'attr' attribute of each
element.
"""
dlist = [(stringify(getattr(val, attr)).lower(), val) for val in seq]
return _sort_and_undecorate(dlist)
def method_sort(seq, method):
"""(seq, method : string) -> []
Sort 'seq' by the result of calling method 'method' on each element.
"""
method_name = stringify(method)
dlist = [(getattr(val, method_name)(), val) for val in seq]
return _sort_and_undecorate(dlist)
def function_sort(seq, func):
"""(seq, func : function) -> []
Sort 'seq' by the result of calling 'func' on each element.
"""
dlist = [(func(val), val) for val in seq]
return _sort_and_undecorate(dlist)
number_re = re.compile(r'\s*(\d+)|(\d*\.\d+)\s*')
def natural_sort(seq, strfunc=stringify):
"""(seq, strfunc : function) -> []
Sort a list of items in a human friendly way. Strings are sorted as
usual, except that decimal integer substrings are compared on their
numeric value. For example,
a < a0 < a1 < a1a < a1b < a2 < a10 < a20
Strings can contain several number parts:
x2-g8 < x2-y7 < x2-y08 < x8-y8
in which case numeric fields are separated by nonnumeric characters.
Leading spaces are ignored. This works very well for IP addresses from
log files, for example.
Numeric substrings with decimal points are treated as floating point.
1.001 < 1.002 < 1.010 < 1.02 < 1.1 < 1.3
This function was inspired by the Mac "Natural Order" utility. It
does not match the Natural Order algorithm exactly and probably could
use improvements.
"""
dlist = []
for item in seq:
parts = []
s = strfunc(item)
while s:
m = number_re.search(s)
if not m:
parts.append(s)
break
else:
parts.append(s[:m.start()])
if m.group(1):
val = int(s[m.start(1):m.end(1)])
else:
val = float(s[m.start(2):m.end(2)])
parts.append(val)
s = s[m.end():]
dlist.append((filter(None, parts), item))
return _sort_and_undecorate(dlist) | PypiClean |
/Lasco-0.1.0.tar.gz/Lasco-0.1.0/lasco/lascocli.py | from ConfigParser import ConfigParser
from cmd import Cmd
from optparse import OptionParser
import os
import readline # pyflakes: ignore
import shlex
import sys
import traceback
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import lasco.api
from lasco.utils import repr_as_table
DEFAULT_ENCODING = 'utf-8'
class ExceptionWrapper(object):
"""A wrapper around ``lasco.api`` that catches any exception,
prints it in a nice colour and avoids leaving the command-line
client.
"""
def __init__(self, module):
self._module = module
def __getattr__(self, attr):
def wrapped(*args, **kwargs):
try:
return getattr(self._module, attr)(*args, **kwargs)
except:
print '\033[31m'
traceback.print_exc(file=sys.stdout)
print '\033[0m'
return False
orig = getattr(self._module, attr)
if not callable(orig):
return orig
return wrapped
def fix_completer(completer):
"""Add a trailing space to the completion matches so that the user
does not have to type a space to separate the completed command
from its arguments.
This mimics Bash completion.
"""
def decorated(*args, **kwargs):
matches = completer(*args, **kwargs)
if matches is None:
return None
return map(lambda m: m + ' ', matches)
return decorated
def get_args(line):
"""Return arguments given in a command line as Unicode strings."""
args = shlex.split(line)
return [unicode(arg, DEFAULT_ENCODING) for arg in args]
class LascoCmd(Cmd):
prompt = 'lasco> '
def __init__(self, conf_file,
custom_api=None,
custom_print=None,
custom_repr_as_table=None):
Cmd.__init__(self)
here = os.path.abspath(os.path.dirname(conf_file))
self.config = ConfigParser(defaults={'here': here})
self.config.read(conf_file)
db_string = self.config.get('app:lasco', 'lasco.db_string')
print db_string
self.engine = create_engine(db_string)
self.session = sessionmaker(self.engine)()
# The following customizations are here for our tests.
if custom_api:
self.api = custom_api
else:
self.api = ExceptionWrapper(lasco.api)
if custom_print:
self.print_ = custom_print
else: # pragma: no coverage
self.print_ = lambda msg: sys.stdout.write(
msg.encode(DEFAULT_ENCODING) + os.linesep)
if custom_repr_as_table:
self.repr_as_table = custom_repr_as_table
else: # pragma: no coverage
self.repr_as_table = repr_as_table
def confirm(self, prompt='Are you sure?'): # pragma: no coverage
if raw_input(prompt + ' [yes/no] ') == 'yes':
return True
else:
self.print_error('Action has been cancelled.')
return False
def print_success(self, msg):
self.print_('\033[0;32m=> %s\033[0m' % msg)
def print_error(self, msg): # pragma: no coverage
self.print_('\033[31m%s\033[0m' % msg)
@fix_completer
def completenames(self, text, *ignored):
return Cmd.completenames(self, text, *ignored)
@fix_completer
def _complete_gallery_name(self, text, line, begidx, endidx):
"""A completer function for all commands that require a
gallery name.
"""
args = get_args(line)
# We check the last character of the line to know whether the
# user wants to type a new argument or complete one for which
# one or more letters have been typed already.
if len(args) > 2 or (len(args) == 2 and line[-1] == ' '):
return []
names = [g.name for g in self.api.get_galleries(self.session)]
return filter(lambda n: n.startswith(text), names)
@fix_completer
def _complete_album_name(self, text, line, begidx, endidx):
"""A completer function for all commands that require a
gallery name followed by an album name.
"""
args = get_args(line)
# We check the last character of the line to know whether the
# user wants to type a new argument or complete one for which
# one or more letters have been typed already.
if len(args) > 3 or (len(args) == 3 and line[-1] == ' '):
return []
if len(args) == 1 or (len(args) == 2 and line[-1] != ' '):
# Called to complete the name of the gallery
names = [g.name for g in self.api.get_galleries(self.session)]
return filter(lambda n: n.startswith(text), names)
if len(args) == 2 or (len(args) == 3 and line[-1] != ' '):
# Called to complete the name of the album
names = [a.name for a in \
self.api.get_albums(self.session, args[1])]
return filter(lambda n: n.startswith(text), names)
def _complete_gallery_name_and_path(self, text, line, begidx, endidx):
"""A completer function for the 'album_add' command."""
args = get_args(line)
# We check the last character of the line to know whether the
# user wants to type a new argument or complete one for which
# one or more letters have been typed already.
if len(args) == 1 or (len(args) == 2 and line[-1] != ' '):
# Called to complete the name of the gallery
names = [g.name for g in self.api.get_galleries(self.session)]
# I only wanted to call the 'fix_completer()' function,
# and then I got slightly carried away... Still, these two
# lambda's on a single line, isn't that pretty?
return fix_completer(
lambda: filter(lambda n: n.startswith(text), names))()
if len(args) == 2 or (len(args) == 3 and line[-1] != ' '):
# Name of the album. Nothing to complete
return []
if len(args) == 3 or (len(args) == 4 and line[-1] != ' '):
# Title of the album. Nothing to complete.
return []
if len(args) == 4 or (len(args) == 5 and line[-1] != ' '):
# readline seems to take the slash character as a
# separator (like a space) in 'text', and thefore sets
# 'text' as the final portion of the path. For example,
# with the following line:
# action /path/to/foo<Tab>
# 'text' is 'foo', not the whole path.
# I am not sure whether this bug affects any readline
# implementation or only the one I use on OS X 10.4
# FIXME: check on FreeBSD.
if line[begidx - 1] == os.sep: # pragma: no coverage
text = line[1 + line[:begidx].rfind(' '):]
dir = os.path.dirname(text)
if not os.path.exists(dir):
return []
if dir == '/':
rest = text[len(dir):] # otherwise we eat the first letter
else:
rest = text[len(dir) + 1:]
matches = []
for candidate in os.listdir(dir):
if candidate.startswith('.'):
continue
if not candidate.startswith(rest):
continue
full_path = os.path.join(dir, candidate)
if not os.path.isdir(full_path):
continue
matches.append('%s%s' % (candidate, os.path.sep))
return matches
def do_user_list(self, __ignored_line_remainder):
users = self.api.get_users(self.session)
if users:
self.print_(
self.repr_as_table(
('Full name', 'Login'),
[u.fullname for u in users],
[u.login for u in users]))
def help_user_list(self):
self.print_('List all users.')
def do_user_add(self, line_remainder):
args = get_args(line_remainder)
if len(args) != 3:
return self.help_user_add(syntax_only=True)
self.api.add_user(self.session, *args)
self.print_success('User has been added.')
def help_user_add(self, syntax_only=False):
if not syntax_only:
self.print_('Add a new user.')
self.print_('Syntax: user_add <login> <full-name> <password>')
def do_user_remove(self, line_remainder):
args = get_args(line_remainder)
if len(args) != 1:
return self.help_user_remove(syntax_only=True)
if self.confirm():
self.api.remove_user(self.session, args[0])
self.print_success('User has been removed.')
def help_user_remove(self, syntax_only=False):
if not syntax_only:
self.print_('Remove a user.')
self.print_('Syntax: user_remove <login>')
def do_lasco_list(self, __ignored_line_remainder):
galleries = self.api.get_galleries(self.session)
if galleries:
self.print_(
self.repr_as_table(
('Name', 'Title'),
[g.name for g in galleries],
[g.title for g in galleries]))
def help_lasco_list(self):
self.print_('List all galleries.')
def do_gallery_add(self, line_remainder):
args = get_args(line_remainder)
if len(args) != 2:
return self.help_gallery_add(syntax_only=True)
self.api.add_gallery(self.session, *args)
self.print_success('Gallery has been added.')
def help_gallery_add(self, syntax_only=False):
if not syntax_only:
self.print_('Add a new gallery.')
self.print_('Syntax: gallery_add <name> <title>')
def do_gallery_remove(self, line_remainder):
args = get_args(line_remainder)
if len(args) != 1:
return self.help_gallery_remove(syntax_only=True)
if self.confirm():
self.api.remove_gallery(self.session, *args)
self.print_success('Gallery has been removed.')
def help_gallery_remove(self, syntax_only=False):
if not syntax_only:
self.print_('Remove a gallery.')
self.print_('Syntax: gallery_remove <name>')
complete_gallery_remove = _complete_gallery_name
def do_gallery_list(self, line_remainder):
args = get_args(line_remainder)
if len(args) != 1:
return self.help_gallery_list(syntax_only=True)
albums = self.api.get_albums(self.session, *args)
if albums:
self.print_(
self.repr_as_table(
('Name', 'Title'),
[a.name for a in albums],
[a.title for a in albums]))
def help_gallery_list(self, syntax_only=False):
if not syntax_only:
self.print_('List albums of a gallery.')
self.print_('Syntax: gallery_list <name>')
complete_gallery_list = _complete_gallery_name
def do_gallery_users(self, line_remainder):
args = get_args(line_remainder)
if len(args) < 1:
return self.help_gallery_users()
elif len(args) == 1:
admins = self.api.get_gallery_administrators(self.session, *args)
self.print_(
self.repr_as_table(
('Login', 'Full name'),
[a.login for a in admins],
[a.fullname for a in admins]))
else:
self.api.manage_gallery_administrators(self.session, *args)
self.print_success('Changes have been applied.')
def help_gallery_users(self, syntax_only=False):
if not syntax_only:
self.print_("Grant or revoke user's rights in a gallery.")
self.print_('Syntax: gallery_users <gallery_name> {+,-}<login>')
self.print_('If no user id is provided, this command lists all '
'administrators of this gallery.')
complete_gallery_users = _complete_gallery_name
def do_album_add(self, line_remainder):
args = get_args(line_remainder)
if len(args) != 4:
return self.help_album_add(syntax_only=True)
self.api.add_album(self.session, self.config, *args)
self.print_success('Album has been added.')
def help_album_add(self, syntax_only=False):
if not syntax_only:
self.print_('Add a new album.')
self.print_('Syntax: album_add <gallery_name> '
'<album_name> <album_title> <pictures_dir>')
complete_album_add = _complete_gallery_name_and_path
def do_album_remove(self, line_remainder):
args = get_args(line_remainder)
if len(args) != 2:
return self.help_album_remove(syntax_only=True)
if self.confirm():
self.api.remove_album(self.session, *args)
self.print_success('Album has been removed.')
def help_album_remove(self, syntax_only=False):
if not syntax_only:
self.print_('Remove an album.')
self.print_('Syntax: album_remove <gallery_name> <album_name>')
complete_album_remove = _complete_album_name
def do_album_users(self, line_remainder):
args = get_args(line_remainder)
if len(args) < 1:
return self.help_album_users()
elif len(args) == 2:
viewers = self.api.get_album_viewers(self.session, *args)
self.print_(
self.repr_as_table(
('Login', 'Full name'),
[v.login for v in viewers],
[v.fullname for v in viewers]))
else:
self.api.manage_album_viewers(self.session, *args)
self.print_success('Changes have been applied.')
def help_album_users(self, syntax_only=False):
if not syntax_only:
self.print_("Grant or revoke user's rights in an album.")
self.print_('Syntax: album_users <gallery_name> <album_name> '
'{+,-}<login>')
self.print_('If no user id is provided, this command lists all '
'viewers of this album.')
complete_album_users = _complete_album_name
def do_shell(self, __ignored_line_remainder): # pragma: no coverage
engine = self.engine # must be available in the shell below
session = self.session # (do not remove)
try:
self.print_('Shell mode. Changes must be committed manually:')
self.print_(' session.commit()')
self.print_("Type 'locals()' to know what you may play with.")
self.print_('Press Control-C to leave the shell.')
while True:
try:
self.print_(unicode(input('>>> ')))
except KeyboardInterrupt:
raise
except:
self.print_error('Exception in user code:')
traceback.print_exc(file=sys.stdout)
except KeyboardInterrupt:
self.print_('Leaving shell.')
def help_shell(self, syntax_only=False):
if not syntax_only:
self.print_('Takes you to a Python shell where you can '
'directly interact with the database connection '
'session.')
self.print_('Syntax: shell')
def do_quit(self, *__ignored_line_remainder):
return True # instruct 'postcmd()' to leave the main loop
do_q = do_quit
def postcmd(self, stop, line): # pragma: no coverage
if not line.startswith('shell'):
self.session.commit()
else:
self.session.rollback()
if stop:
self.session.close()
return stop
def main(): # pragma: no coverage
parser = OptionParser(usage='%prog [-c FILE]')
parser.add_option(
"-c", "--conf", dest="conf_file", default='./Lasco.ini',
help="use FILE as the configuration file (default is './Lasco.ini')",
metavar="FILE")
options, args = parser.parse_args()
if not os.path.exists(options.conf_file):
error = 'Could not find configuration file ("%s").' % options.conf_file
sys.exit(error)
cmd = LascoCmd(options.conf_file)
cmd.cmdloop()
if __name__ == '__main__': # pragma: no coverage
main() | PypiClean |
/MultivariaTeach-0.1.8.tar.gz/MultivariaTeach-0.1.8/multivariateach/multivariateach.py | import numpy as np
import pandas as pd
import scipy
# Structures for returning results
class MANOVAResult:
def __init__(self, beta_hat, E, B, H, wilks_lambda, pillais_trace, hotelling_lawley_trace, roys_largest_root):
self.beta_hat = beta_hat
self.E = E
self.B = B
self.H = H
self.wilks_lambda = wilks_lambda
self.pillais_trace = pillais_trace
self.hotelling_lawley_trace = hotelling_lawley_trace
self.roys_largest_root = roys_largest_root
class F_statistic:
def __init__(self, statistic, F, df_n, df_d, p_value):
self.statistic = statistic
self.F = F
self.df_n = df_n
self.df_d = df_d
self.p_value = p_value
class chi2_statistic:
def __init__(self, statistic, chi2, df, p_value):
self.statistic = statistic
self.chi2 = chi2
self.df = df
self.p_value = p_value
# Data processing functions
def create_design_matrix(df, column):
"""
The model is a ``reduced-rank factor effects'' model in the
style of SAS. The X matrix is the ``model'' or ``design''
matrix. It leads with a column of ones for the intercept,
after which it has an indicator column for each observed
variable.
"""
X = np.hstack([
np.ones((df.shape[0], 1)),
pd.get_dummies(df[column]).values
])
return(X)
def create_response_matrix(data, columns):
"""
The response matrix should just be the observations, all the
observations, and only the observations. Group indicators
should be omitted and are instead represented by the model
matrix above.
Yes, this code is trivial; it is designed to make clear what
is expected / required.
"""
Y = data[columns].values
return(Y)
# Helper tools to create contrast matrices
def create_contrast_type_iii(X):
"""
Creates a ``Type III'' hypothesis matrix intended to test
whether any of the means of any of the groups differ. Note
that this assumes that such a test is sensible, which
depends very much on the data and what questions you're
trying to answer.
"""
n, r = X.shape
C = np.zeros((r - 2, r))
for i in range(1, r - 1):
C[i - 1, i] = -1
C[i - 1, i + 1] = 1
return C
def orthopolynomial_contrasts(n, degree):
"""
n: number of contrasts (predictor variables)
degree: highest polynomial degree to include
"""
x = np.linspace(-1, 1, n)
M = np.empty((degree, n))
M[0, :] = x
if degree > 1:
M[1, :] = (3 * x ** 2 - 1) / 2
for i in range(2, degree):
M[i, :] = ((2 * i + 1) * x * M[i - 1, :] - i * M[i - 2, :]) / (i + 1)
return M.T
# Statistical tests
def run_manova(X, Y, C, M, alpha=0.05):
"""
X: model (column of ones followed by ``dummies'' for groups)
Y: data (rows must match X)
C: contrast across variables
M: contrast across groups
Calculations here follow the math given in:
https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.3/statug/statug_introreg_sect038.htm
We mostly follow the variable naming convention in SAS;
however, where we use `C` for the contrast matrix across
variables, the SAS documentation uses `L`. There may be other
differences.
We test the null hypothesis `C @ beta @ M = 0.
That is, with whatever contrasts you supply in the C and M
matrices, we test the assumption that there is no
significant variation.
If the resulting p-value is less than your choice of alpha,
you should reject the null hypothesis and conclude that
there is variation. But you may need to resort to other
tests to determine what is varying. That might include
different choices of contrasts, univariate tests, or other
techniques entirely.
"""
beta_hat = np.linalg.pinv(X.T @ X) @ X.T @ Y
# E, the error / within-group SSCP matrix (AKA ``W'')
E = M.T @ (
Y.T @ Y
-
Y.T @ X @ np.linalg.pinv(X.T @ X) @ X.T @ Y
) @ M
# B, the between-group SSCP matrix
B = (C @ beta_hat).T \
@ np.linalg.inv(C @ np.linalg.pinv(X.T @ X) @ C.T) \
@ C @ beta_hat
# H, the hypothesis SSCP matrix
H = M.T @ B @ M
n, p = Y.shape
g = X.shape[1] - 1
q = np.linalg.matrix_rank(C @ np.linalg.pinv(X.T @ X) @ C.T)
wl = wilks_lambda(E, H, n, p, g, q)
pt = pillais_trace(E, H, n, p, g, q)
hlt = hotelling_lawley_trace(E, H, n, p, g, q)
rlr = roys_largest_root(E, H, n, p, g)
return MANOVAResult(beta_hat, E, B, H, wl, pt, hlt, rlr)
def perform_box_m_test(X, Y):
"""
Compute Box's M test for the homogeneity of covariance matrices.
Parameters:
X (numpy array): A 2D numpy array representing the model matrix (including a leading column of ones and columns of dummy variables for group inclusion).
Y (numpy array): A 2D numpy array representing the observations.
Returns a chi2_statistic object.
"""
num_groups = X.shape[1] - 1
num_variables = Y.shape[1]
num_observations = [np.sum(X[:, i+1]) for i in range(num_groups)]
groups = [Y[X[:, i+1].astype(bool)] for i in range(num_groups)]
means = [np.atleast_2d(np.mean(group, axis=0)).T for group in groups]
covariances = [np.cov(group, rowvar=False) for group in groups]
pooled_covariance = calculate_pooled_covariance_matrix(X, Y)
u = 0
M = 0
for n_i, cov_i in zip(num_observations, covariances):
u += 1 / (n_i - 1)
M += (n_i - 1) * np.log(np.linalg.det(cov_i))
u = (u - (1 / (sum(num_observations) - num_groups))) * (
(2 * num_variables**2 + 3 * num_variables - 1)
/
(6 * (num_variables + 1) * (num_groups - 1))
)
M = (sum(num_observations) - num_groups) * np.log(np.linalg.det(pooled_covariance)) - M
C = (1 - u) * M
nu = 0.5 * num_variables * (num_variables + 1) * (num_groups - 1)
p_value = 1 - scipy.stats.chi2.cdf(C, nu)
return chi2_statistic(M, C, nu, p_value)
def mauchly(X, Y):
"""
X: model (column of ones followed by ``dummies'' for groups)
Y: data (rows must match X)
"""
n = Y.shape[1]
degree = n - 1
M = orthopolynomial_contrasts(n, degree)
S_p = calculate_pooled_covariance_matrix(X, Y)
k = M.shape[1]
lsl_matrix = M.T @ S_p @ M
determinant = np.linalg.det(lsl_matrix)
trace = np.trace(lsl_matrix)
w_stat = determinant / ((1 / (k - 1)) * trace) ** (k - 1)
# Calculate the transformed W statistic that is chi-square distributed
n, _ = X.shape
n1 = n - 1
g = 1 - (2 * k ** 2 + k + 2) / (6 * k * n1)
transformed_w_stat = -n1 * g * np.log(w_stat)
df = k * (k + 1) / 2 - 1
p_value = 1 - scipy.stats.chi2.cdf(transformed_w_stat, df)
return(chi2_statistic(w_stat, transformed_w_stat, df, p_value))
# Multivariate test statistics
def wilks_lambda(E, H, n, p, g, q):
"""
n: number of observations (rows)
p: number of variables (columns in Y)
g: number of groups (columns in X excluding the column of leading ones)
q: the rank of (C @ np.linalg.pinv(X.T @ X) @ C.T)
The calculation is ``exact'' in the case of g = 3 and n >= 1;
otherwise, it's an approximation.
"""
wilks_lambda = np.linalg.det(E) / np.linalg.det(H + E)
if wilks_lambda < 1e-15:
wilks_lambda = 1e-15
# ``Exact'' calculation in limited circumstances: three groups
# and at least one dependent variable. Recall that X leads
# with a column of ones, so a three-group analysis will have
# four columns in X.
if g == 3 and n >= 1:
F = ((n - p - 2) / p) * ((1 - np.sqrt(wilks_lambda)) / np.sqrt(wilks_lambda))
df_n = 2 * p
df_d = 2 * (n - p - 2)
else:
v = n*(n+1)/2
p = np.linalg.matrix_rank(H+E)
s = min(p, q)
m = (abs(p-q)-1)/2
n = (v-p-1)/2
r = v - (p-q+1)/2
u = (p*q-2)/4
if p**2 + q**2 - 5 > 0:
t = np.sqrt( (p^2 * q^2 - 4) / (p^2 + q^2 - 5) )
else:
t = 1
F = ( (1 - wilks_lambda**(1/t)) / wilks_lambda**(1/t) ) * ( (r*t - 2*u) / p*q )
df_n = p*2
df_d = r - 2*u
p_value = scipy.stats.f.sf(F, df_n, df_d)
return F_statistic(wilks_lambda, F, df_n, df_d, p_value)
def pillais_trace(E, H, n, p, g, q):
"""
n: number of observations (rows)
p: number of variables (columns in Y)
g: number of groups (columns in X excluding the column of leading ones)
"""
V = np.trace(H @ np.linalg.inv(H+E) )
s = g - 1
err_dof = n - q - 1
p = np.linalg.matrix_rank(H+E)
s = min(p, q)
m = ( np.abs(p - q) - 1) / 2
n = (err_dof - p - 1) / 2
df_n = s * (2*m + s + 1)
df_d = s * (2*n + s + 1)
F = ( (2*n + s + 1) / (2*m + s + 1) ) * (V / (s-V))
p_value = scipy.stats.f.sf(F, df_n, df_d)
return F_statistic(V, F, df_n, df_d, p_value)
def hotelling_lawley_trace(E, H, n, p, g, q):
"""
n: number of observations (rows)
p: number of variables (columns in Y)
g: number of groups (columns in X excluding the column of leading ones)
"""
U = np.trace(np.linalg.inv(E) @ H)
s = g - 1
err_dof = n - q - 1
p = np.linalg.matrix_rank(H+E)
s = min(p, q)
m = ( np.abs(p - q) - 1) / 2
n = (err_dof - p - 1) / 2
df_n = s * (2*m + 2 + 1)
# NOTE: The following calculation is what is specified in the
# SAS documentation. The result does not match calculations
# done with SAS. Also note that the calculated F statistic
# differs accordingly as well.
df_d = 2 * (s*n + 1)
F = ( (2 * (s*n + 1)) * U) / (s**2 * (2*m + s + 1) )
p_value = scipy.stats.f.sf(F, df_n, df_d)
return F_statistic(U, F, df_n, df_d, p_value)
def roys_largest_root(E, H, n, p, g):
"""
n: number of observations (rows)
p: number of variables (columns in Y)
g: number of groups (columns in X excluding the column of leading ones)
"""
largest_root = np.max(np.real(np.linalg.eigvals(np.linalg.inv(E) @ H)))
s = g - 1
df_n = p
df_d = n - p - s + 1
F = largest_root * (n - p - s + 1) / p
p_value = scipy.stats.f.sf(largest_root, df_n, df_d)
return F_statistic(largest_root, F, df_n, df_d, p_value)
# Post-hoc tests
def greenhouse_geisser_correction(Y, M):
S = np.cov(Y - Y.mean(axis=0), rowvar=False)
Sigma_m = M.T @ S @ M
eigenvalues = np.linalg.eigvalsh(Sigma_m)
epsilon = \
(np.sum(eigenvalues) ** 2) \
/ \
((M.shape[0] - 1) * np.sum(eigenvalues ** 2))
return epsilon
def tukey_test():
pass
def bonferroni_correction():
pass
# Utility functions
def calculate_pooled_covariance_matrix(X, Y):
n, r = X.shape
p = Y.shape[1]
S_p = np.zeros((p, p))
group_ids = np.unique(X[:, 1:])
for group_id in group_ids:
group_idx = (X[:, 1:] == group_id).any(axis=1)
Y_group = Y[group_idx, :]
n_group = Y_group.shape[0]
group_cov = np.cov(Y_group, rowvar=False, ddof=1)
S_p += (n_group - 1) * group_cov
S_p /= (n - r)
return S_p | PypiClean |
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Alternatives.py | from Symbol import *
import traceback
import gtk
from Phrase import *
from Word import make_word
import Parser
class GlypherBox(GlypherPhrase) :
colour = None
anchor = None
attached_to = None
caret = None
def __init__(self, phrase, colour = (0.9, 0.8, 0.6), anchor = None,
attached_to = None, caret = None, global_offset=(0,0)) :
GlypherPhrase.__init__(self, parent=None)
self.mes.append('box')
self.adopt(phrase)
self.colour = colour
self.global_offset = global_offset
if anchor is not None :
self.move_to(*anchor)
if self.config[0].bbox[0] != a[0] or self.config[0].bbox[1] != a[1]:
debug_print(a)
quit()
self.anchor = anchor
elif attached_to is not None :
self.attached_to = attached_to
elif caret is not None :
self.caret = caret
#else :
# raise(RuntimeError("Tried to create Box without specifying location or attached_to"))
self.cast()
debug_print(self.anchor)
def cast(self) :
a = None
if self.attached_to is not None :
x, y = self.attached_to.get_caret_position()
#x -= self.global_offset[0]
#y -= self.global_offset[1]
#x += self.attached_to.get_width()
y += self.attached_to.get_height()
debug_print(self.attached_to.format_me())
a = (x+10, y)
elif self.caret is not None :
a = self.caret.position
debug_print(a)
debug_print(self.anchor)
if a is not None and a != self.anchor:
self.move_to(*a)
debug_print(self.config[0].bbox)
self.anchor = a
return
def draw(self, cr) :
self.cast()
bb = self.config[0].get_bbox()
c = self.colour
draw.draw_box(cr, c, bb)
debug_print(self.config[0].bbox)
debug_print(self.anchor)
GlypherPhrase.draw(self, cr)
class GlypherWidgetBox(GlypherBox) :
gw = None
widget = None
def destroy(self) :
self.caret.boxes.remove(self)
self.caret.return_focus()
self.widget.get_parent().remove(self.widget)
del self
def __init__(self, widget, widget_parent, caret = None, attached_to = None, box_colour = (0.9, 0.8, 0.6)) :
self.widget = widget
self.caret = caret
self.gw = GlypherWidget(None, widget, widget_parent, self,
caret.glypher.position)
faded = map(lambda c: 1-(1-c)*0.2, box_colour)
self.gw.ebox.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(*faded))
GlypherBox.__init__(self, self.gw, caret=caret, attached_to=attached_to,
colour=box_colour, global_offset=caret.position)
self.mes.append('widget_box')
self.widget.grab_focus()
class GlypherLabelBox(GlypherWidgetBox) :
labl = None
def __init__(self, text, widget_parent, caret = None, attached_to = None, box_colour = (0.9, 0.8, 0.6)) :
self.labl = gtk.Label(text)
self.labl.set_line_wrap(True)
self.labl.set_size_request(200, -1)
self.labl.set_alignment(1.0, 1.0)
GlypherWidgetBox.__init__(self, self.labl, widget_parent, caret=caret, attached_to=attached_to, box_colour=box_colour)
class GlypherSymbolShortener(GlypherWidgetBox) :
sym_entry = None
def __init__(self, widget_parent, caret, box_colour = (0.9, 0.8, 0.6)) :
hbox = gtk.HBox(False, 4)
hbox.pack_start(gtk.Label("Symbol"), False)
sym_entry = gtk.Entry(); sym_entry.set_size_request(30, -1)
self.sym_entry = sym_entry
hbox.pack_start(sym_entry)
hbox.pack_start(gtk.Label("Trigger text"), False)
trigger_entry = gtk.Entry()
hbox.pack_start(trigger_entry)
GlypherWidgetBox.__init__(self, hbox, widget_parent, caret=caret, box_colour=box_colour)
sym_entry.grab_focus()
trigger_entry.connect('activate', self.do_trigger_entry_activate)
trigger_entry.connect('key-press-event', \
lambda w, e : self.destroy() if gtk.gdk.keyval_name(e.keyval) == 'Escape' else 0)
sym_entry.connect('key-press-event', \
lambda w, e : self.destroy() if gtk.gdk.keyval_name(e.keyval) == 'Escape' else 0)
def do_trigger_entry_activate(self, entry) :
ue = unicode(entry.get_text())
if ue == '' and ue in g.combinations :
del g.combinations[ue]
else :
g.combinations[ue] = unicode(self.sym_entry.get_text())
l = make_word(ue, self.caret.phrased_to)
self.caret.insert_entity(l)
self.destroy()
class GlypherEntry(GlypherWidgetBox) :
entry = None
gw = None
caret = None
def __init__(self, widget_parent, caret, box_colour = (0.9, 0.8, 0.6)) :
self.entry = gtk.Entry()
GlypherWidgetBox.__init__(self, self.entry, widget_parent, caret=caret, box_colour=box_colour)
self.mes.append('TeX_entry')
self.caret = caret
self.wrong_colour = (1.0, 0.5, 0.5)
self.entry.connect('activate', self.do_entry_activate)
self.entry.connect('key-press-event', \
lambda w, e : self.destroy() if gtk.gdk.keyval_name(e.keyval) == 'Escape' else 0)
def submit(self) :
t = self.entry.get_text()
debug_print(t)
l = make_word(t, self.caret.phrased_to)
self.caret.insert_entity(l)
return True
def do_entry_activate(self, entry) :
if self.submit() : self.destroy()
else :
self.entry.modify_text(gtk.STATE_NORMAL, gtk.gdk.Color(*self.wrong_colour))
class GlypherTeXEntry(GlypherEntry) :
def __init__(self, widget_parent, caret) :
GlypherEntry.__init__(self, widget_parent, caret, box_colour = (0.9, 0.5, 0.3))
def submit(self) :
t = Parser.get_name_from_latex(self.entry.get_text())
if t is not None :
try :
debug_print(t)
self.caret.insert_named(t)
return True
except RuntimeError :
debug_print(Parser.latex_to_name)
return False
t = Parser.get_shape_from_latex(self.entry.get_text())
if t is not None :
self.caret.insert_shape(t)
return True
return False
class GlypherWidget(GlypherEntity) :
widget = None
ebox = None
def __init__(self, parent, widget, widget_parent, box, global_offset) :
GlypherEntity.__init__(self, parent)
self.add_properties({'local_space' : True})
self.widget = widget
self.box = box
self.offset = global_offset
#widget.grab_focus()
self.ebox = gtk.EventBox()
#widget.modify_bg(0, gtk.gdk.Color(1,1,1))
e = self.ebox
#e.set_size_request(100, 50)
e.set_events(gtk.gdk.ALL_EVENTS_MASK)
e.connect("button-press-event", lambda w, e : debug_print(e))
#sc = e.get_screen()
#e.set_colormap(sc.get_rgba_colormap())
#e.set_app_paintable(True)
e.add(widget)
widget_parent.put(e, 0, 0)
#e.window.set_composited(True)
al = e.get_size_request()
debug_print(al)
m = e.size_request()
self.ref_width = m[0]#al.height
self.ref_height = m[1]#al.width
self.recalc_bbox()
r = self._get_rect(None)
#e.window.move(self.config[0].bbox[0], self.config[0].bbox[1])
e.size_allocate(r)
self.first_move = False
widget_parent.move(e, 0, 0)
debug_print(widget.get_allocation())
#debug_print(widget.window.get_geometry())
#debug_print(widget.window.get_frame_extents())
debug_print(self.config[0].bbox)
e.set_visible(False)
#widget.grab_focus()
def _get_rect(self, cr) :
#x, y = self.get_local_offset()
x, y = (0,0)
#x = self.offset[0]
#y = self.offset[1]
#y += self.ref_height
if cr is not None :
self.box.cast()
x, y = self.box.get_local_offset()
x += self.box.config[0].bbox[0]
y += self.box.config[0].bbox[1]
x, y = cr.user_to_device(x, y)
w, h = (self.ref_width, self.ref_height)
if cr is not None :
w, h = cr.user_to_device_distance(w, h)
#y -= self.ref_height
#return gtk.gdk.Rectangle(int(x), int(y-w), int(h), int(w))
return gtk.gdk.Rectangle(int(x), int(y), int(w), int(h))
def _move_ebox(self, cr=None) :
e = self.ebox
a = e.get_allocation()
e.show_all()
m = e.size_request()
if cr is not None :
m = cr.device_to_user_distance(*m)
self.ref_width = m[0]#al.height
self.ref_height = m[1]#al.width
r = self._rect
debug_print(r)
r1 = self._get_rect(cr)
if cr is not None :
debug_print(r1)
debug_print(self.box.anchor)
debug_print_stack()
if e.allocation.x != r1.x or e.allocation.y != r1.y :
e.get_parent().move(e, r1.x, r1.y)
e.show_all()
self.recalc_bbox()
x = None
y = None
_rect = None
def draw(self, cr) :
#if self._rect != self._get_rect(cr) :
self._rect = self._get_rect(cr)
self._move_ebox(cr)
self.ebox.set_visible(True)
#cr.save()
#e = self.ebox
#a = e.get_allocation()
#if a.x != int(self.config[0].bbox[0]) or \
# a.y != int(self.config[0].bbox[1]) :
#def process_key(self, name, event, caret) :
# if not self.widget.has_focus() : return
# return self.ebox.event(event)
def process_button_release(self, event) :
#self.widget.grab_focus()
#return self.widget.has_focus()
#self.widget.do_button_release_event(self.widget, event)
#return True if self.widget.event(event) else None
return None
def process_button_press(self, event) :
#self.widget.grab_focus()
#return self.widget.has_focus()
#self.widget.do_button_press_event(self.widget, event)
#return True if self.widget.event(event) else None
return None
def process_scroll(self, event) :
self.widget.grab_focus()
return None
#return True if self.widget.event(event) else None
class GlypherAltBox(GlypherBox) :
alts = None
alts_syms = None
alts_phrase = None
anchor = (0,0)
def __init__(self, alts) :
self.alts = alts
self.alts_phrase = GlypherPhrase(None)
GlypherBox.__init__(self, self.alts_phrase)
self.alts_phrase.mes.append('altbox_phrase')
self.cast()
def cast(self) :
n = 0
self.alts_syms = {}
self.alts_phrase.empty()
for alt in self.alts :
if isinstance(alt, GlypherEntity) and alt.included() : raise(RuntimeError, alt.format_me())
for alt in self.alts :
if isinstance(alt, GlypherEntity) :
ns = alt
else :
ns = GlypherSymbol(None, str(alt), ink=True)
self.alts_syms[alt] = ns
self.alts_phrase.append(ns, row=n)
self.alts_phrase.set_row_align(n, 'c')
ns.set_padding_all(4)
n -= 1
self.anchor = (0,0) # (self.alts_phrase.config[0].bbox[0]-20, self.alts_phrase.config[0].bbox[1])
self.translate(-self.config[0].bbox[0], -self.config[0].bbox[1])
def draw(self, cr, anchor, size, rgb_colour, active=None) :
if anchor != self.anchor :
self.anchor = (0,0)
if size != self.alts_phrase.get_font_size() :
self.set_font_size(size)
if rgb_colour != self.alts_phrase.get_rgb_colour() :
self.set_rgb_colour(rgb_colour)
GlypherBox.draw(self, cr)
if active and active in self.alts_syms :
cr.save()
bbp = self.alts_syms[active].config[0].get_bbox()
bbs = self.alts_syms[active].config[0].get_bbox()
cr.set_source_rgba(0.9, 0.8, 0.6)
mp = 0.5*(bbs[1]+bbs[3])
cr.move_to(bbp[0] - 16, mp-4)
cr.line_to(bbp[0] - 10, mp)
cr.line_to(bbp[0] - 16, mp+4)
cr.close_path()
cr.fill_preserve()
cr.set_line_width(2)
cr.set_source_rgb(0.8,0.6,0.2)
cr.stroke()
cr.restore()
class GlypherAlternativesPhrase(GlypherPhrase) :
active_child = None
def __init__(self, parent, area = (0,0,0,0), line_size_coeff = 1.0, font_size_coeff = 1.0, align = ('l','m'), auto_fices = False) :
GlypherPhrase.__init__(self, parent, area, line_size_coeff, font_size_coeff, align, auto_fices)
self.mes.append('alts_phrase')
self.set_enterable(False)
self.set_attachable(True)
self.set_have_alternatives(True)
self.altbox = GlypherAltBox([])
#self.characteristics.append('_in_phrase')
def decorate(self, cr) :
hl_anc = None
# If this is in an unhighlighted highlight group, don't show it, otherwise if the first highlighted group is
# above it, show it
for anc in self.get_ancestors() :
if anc.am('phrasegroup') :
if anc.first_highlighted_pg_over_active : hl_anc = anc; break
#else : hl_anc = None; break
elif anc.highlight_group : hl_anc = None; break
if not hl_anc : return
cr.save()
bb = self.config[0].get_bbox()
cr.move_to(bb[2]-2, bb[1]-3)
cr.line_to(bb[2]+3, bb[1]-3)
cr.line_to(bb[2]+3, bb[1]+2)
cr.close_path()
cr.set_source_rgba(0.0, 1.0, 0.0, 0.5)
cr.fill_preserve()
cr.set_source_rgba(0.0, 0.5, 0.0, 1.0)
cr.stroke()
cr.restore()
def child_change(self) :
GlypherPhrase.child_change(self)
self.cast()
for child in self.entities :
if child != self.active_child and child.get_visible() :
child.hide()
if len(self.entities)>0 :
if not self.active_child :
self.active_child = self.entities[0]
self.active_child.show()
elif not self.active_child.get_visible() :
self.active_child.show()
def cast(self) :
self.altbox.alts = copy.deepcopy(self.entities)
alist = list(self.altbox.alts)
for alt in alist :
alt.set_parent(None)
if alt.included() : raise(RuntimeError, str(alt.format_me()))
self.altbox.cast()
def next_alternative(self) :
alts = self.entities
if self.active_child == None : return
ind = alts.index(self.active_child)
self.active_child = alts[(len(alts) + ind - 1)%len(alts)]
self.child_change()
def prev_alternative(self) :
alts = self.entities
if self.active_child == None : return
ind = alts.index(self.active_child)
self.active_child = alts[(len(alts) + ind - 1)%len(alts)]
self.child_change()
def draw_alternatives(self, cr) :
if not self.get_visible() : return
altbox = self.altbox
altbox.draw(cr, anchor=(self.config[0].bbox[2], self.config[0].bbox[1]),\
size=self.get_scaled_font_size(), rgb_colour=self.get_rgb_colour(), active=self.active_child)
self.draw(cr)
def set_alternative(self, child) :
if child not in self.entities : return
self.active_child = child
self.child_change()
def set_alternative_by_name(self, name) :
named = filter(lambda e : e.get_name() == name, self.entities)
if len(named) == 0 : return
self.set_alternative(named[0])
ref_alts_phrase = None
def make_alts_phrase () :
global ref_alts_phrase
if ref_alts_phrase is None :
ref_alts_phrase = GlypherAlternativesPhrase(None)
return copy.deepcopy(ref_alts_phrase) | PypiClean |
/6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/04-playlist-plus/README.md | # Episode 04-playlist-plus: The Final Showdown.
--
**Now that we've improved our genre based playlist generator, expose a 'hint'
parameter allowing the user to suggest a better bonus movie. The bonus movie is
chosen from the movie that has the most similiar name to the 'hint'.
This is achieved by importing a string edit distance package using Metaflow's
conda based dependency management feature. Dependency management builds
isolated and reproducible environments for individual steps.**
--
#### Showcasing:
- Metaflow's conda based dependency management.
#### Before playing this episode:
This tutorial requires the 'conda' package manager to be installed with the
conda-forge channel added.
1. Download Miniconda at 'https://docs.conda.io/en/latest/miniconda.html'
2. ```conda config --add channels conda-forge```
#### To play this episode:
1. ```cd metaflow-tutorials```
2. ```python 04-playlist-plus/playlist.py --environment=conda show```
3. ```python 04-playlist-plus/playlist.py --environment=conda run```
4. ```python 04-playlist-plus/playlist.py --environment=conda run --hint "Data Science Strikes Back"``` | PypiClean |
/EOmaps-7.0-py3-none-any.whl/eomaps/cb_container.py |
import logging
from types import SimpleNamespace
from functools import update_wrapper, partial, wraps
from itertools import chain
from .callbacks import (
ClickCallbacks,
PickCallbacks,
KeypressCallbacks,
MoveCallbacks,
)
from .helpers import register_modules
import matplotlib.pyplot as plt
from pyproj import Transformer
import numpy as np
_log = logging.getLogger(__name__)
class GeoDataFramePicker:
"""Collection of pick-methods for geopandas.GeoDataFrames"""
def __init__(self, gdf, val_key, pick_method):
self.gdf = gdf
self.val_key = val_key
self.pick_method = pick_method
def get_picker(self):
(gpd,) = register_modules("geopandas")
if self.pick_method == "contains":
return self._contains_picker
elif self.pick_method == "centroids":
from scipy.spatial import cKDTree
self.tree = cKDTree(
list(map(lambda x: (x.x, x.y), self.gdf.geometry.centroid))
)
return self._centroids_picker
else:
raise TypeError(
f"EOmaps: {self.pick_method} is not a valid " "pick_method!"
)
def _contains_picker(self, artist, mouseevent):
(gpd,) = register_modules("geopandas")
try:
query = getattr(self.gdf, "contains")(
gpd.points_from_xy(
np.atleast_1d(mouseevent.xdata),
np.atleast_1d(mouseevent.ydata),
)[0]
)
if query.any():
ID = self.gdf.index[query][0]
ind = query.values.nonzero()[0][0]
if self.val_key:
val = self.gdf[query][self.val_key].iloc[0]
else:
val = None
if artist.get_array() is not None:
val_numeric = artist.norm(artist.get_array()[ind])
val_color = artist.cmap(val_numeric)
else:
val_numeric = None
val_color = None
return True, dict(
ID=ID,
ind=ind,
val=val,
val_color=val_color,
pos=(mouseevent.xdata, mouseevent.ydata),
)
else:
return False, dict()
except Exception:
return False, dict()
def _centroids_picker(self, artist, mouseevent):
try:
dist, ind = self.tree.query((mouseevent.xdata, mouseevent.ydata), 1)
ID = self.gdf.index[ind]
if self.val_key is not None:
val = self.gdf.iloc[ind][self.val_key]
else:
val = None
pos = self.tree.data[ind].tolist()
try:
val_numeric = artist.norm(artist.get_array()[ID])
val_color = artist.cmap(val_numeric)
except Exception:
val_color = None
return True, dict(ID=ID, pos=pos, val=val, ind=ind, val_color=val_color)
except Exception:
return False, dict()
class _CallbackContainer(object):
"""Base-class for callback containers."""
def __init__(self, m, cb_class=None, method="click", parent_container=None):
self._m = m
self._parent_container = parent_container
if self._parent_container is None:
self._temporary_artists = []
else:
self._temporary_artists = self._parent_container._temporary_artists
self._cb = cb_class(m, self._temporary_artists)
self._cb_list = cb_class._cb_list
self.attach = self._attach(self)
self.get = self._get(self)
self._fwd_cbs = dict()
self._method = method
self._event = None
self._execute_on_all_layers = False
def _getobj(self, m):
"""Get the equivalent callback container on another maps object."""
return getattr(m.cb, self._method, None)
@property
def _objs(self):
"""Get the callback-container objects associated with the event-axes."""
# Note: it is possible that more than 1 Maps objects are
# assigned to the same axis!
objs = []
if self._event is not None:
if hasattr(self._event, "mouseevent"):
event = self._event.mouseevent
else:
event = self._event
# make sure that "all" layer callbacks are executed before other callbacks
ms, malls = [], []
for m in reversed((*self._m.parent._children, self._m.parent)):
if m.layer == "all":
malls.append(m)
else:
ms.append(m)
ms = ms + malls
if self._method in ["keypress"]:
for m in ms:
# always execute keypress callbacks irrespective of the mouse-pos
obj = self._getobj(m)
# only include objects that are on the same layer
if obj is not None and self._execute_cb(obj._m.layer):
objs.append(obj)
else:
for m in ms:
# don't use "is" in here since Maps-children are proxies
# (and so are their attributes)!
if event.inaxes == m.ax:
obj = self._getobj(m)
# only include objects that are on the same layer
if obj is not None and self._execute_cb(obj._m.layer):
objs.append(obj)
return objs
def _clear_temporary_artists(self):
while len(self._temporary_artists) > 0:
art = self._temporary_artists.pop(-1)
self._m.BM._artists_to_clear.setdefault(self._method, []).append(art)
def _sort_cbs(self, cbs):
if not cbs:
return set()
cbnames = set([i.rsplit("__", 1)[0].rsplit("_", 1)[0] for i in cbs])
sortp = self._cb_list + list(set(self._cb_list) ^ cbnames)
return sorted(
list(cbs), key=lambda w: sortp.index(w.rsplit("__", 1)[0].rsplit("_", 1)[0])
)
def __repr__(self):
txt = "Attached callbacks:\n " + "\n ".join(
f"{key}" for key in self.get.attached_callbacks
)
return txt
def forward_events(self, *args):
"""
Forward callback-events from this Maps-object to other Maps-objects.
(e.g. share events one-way)
Parameters
----------
args : eomaps.Maps
The Maps-objects that should execute the callback.
"""
for m in args:
self._fwd_cbs[id(m)] = m
def share_events(self, *args):
"""
Share callback-events between this Maps-object and all other Maps-objects.
(e.g. share events both ways)
Parameters
----------
args : eomaps.Maps
The Maps-objects that should execute the callback.
"""
for m1 in (self._m, *args):
for m2 in (self._m, *args):
if m1 is not m2:
self._getobj(m1)._fwd_cbs[id(m2)] = m2
if self._method == "click":
self._m.cb._click_move.share_events(*args)
def add_temporary_artist(self, artist, layer=None):
"""
Make an artist temporary (remove it from the map at the next event).
Parameters
----------
artist : matplotlib.artist
The artist to use
layer : str or None, optional
The layer to put the artist on.
If None, the layer of the used Maps-object is used. (e.g. `m.layer`)
"""
if layer is None:
layer = self._m.layer
# in case the artist has already been added as normal or background
# artist, remove it first!
if artist in chain(*self._m.BM._bg_artists.values()):
self._m.BM.remove_bg_artist(artist)
if artist in chain(*self._m.BM._artists.values()):
self._m.BM.remove_artist(artist)
self._m.BM.add_artist(artist, layer=layer)
self._temporary_artists.append(artist)
def _execute_cb(self, layer):
"""
Get bool if a callback assigned on "layer" should be executed.
- True if the callback is assigned to the "all" layer
- True if the corresponding layer is currently active
- True if the corresponding layer is part of a currently active "multi-layer"
(e.g. "layer|layer2" or "layer|layer2{0.5}" )
Parameters
----------
layer : str
The name of the layer to which the callback is attached.
Returns
-------
bool
Indicator if the callback should be executed on the currently visible
layer or not.
"""
if self.execute_on_all_layers:
return True
visible_layer = self._m.BM.bg_layer
if layer == "all":
# the all layer is always executed
return True
elif "|" in visible_layer:
if layer == visible_layer:
# return true for the multi-layer itself
return True
else:
# return true for layers that are part of the multi-layer
# (make sure to strip off transparency assignments, e.g. "layer{}" )
return any(
i.strip().split("{")[0] == layer for i in visible_layer.split("|")
)
else:
return layer == visible_layer
@property
def execute_on_all_layers(self):
if self._parent_container is not None:
return self._parent_container._execute_on_all_layers
return self._execute_on_all_layers
def set_execute_on_all_layers(self, q):
"""
If True, callbacks of this container are executed even if the associated
layer is not visible.
(By default, callbacks are only executed if the associated layer is visible!)
Parameters
----------
q : bool
True if callbacks should be executed irrespective of the visible layer.
"""
if q:
_log.debug(
f"EOmaps: {self._method} callbacks of the Maps-object {self._m} "
"are executed on all layers!"
)
if self._parent_container is not None:
raise TypeError(
f"EOmaps: 'execute_on_all_layers' is inherited for {self._method}!"
)
self._execute_on_all_layers = q
class _ClickContainer(_CallbackContainer):
"""
A container for attaching callbacks and accessing return-objects.
attach : accessor for callbacks.
Executing the functions will attach the associated callback to the map!
get : accessor for return-objects
A container to provide easy-access to the return-values of the callbacks.
"""
def __init__(self, m, cb_cls=None, method="pick", default_button=1, **kwargs):
super().__init__(m, cb_cls, method, **kwargs)
# a dict to identify connected _move callbacks
# (e.g. to remove "_move" and "click" cbs in one go)
self._connected_move_cbs = dict()
self._sticky_modifiers = []
# the default button to use when attaching callbacks
self._default_button = default_button
class _attach:
"""
Attach custom or pre-defined callbacks to the map.
Each callback-function takes 2 additional keyword-arguments:
double_click : bool
Indicator if the callback should be executed on double-click (True)
or on single-click events (False). The default is False
button : int
The mouse-button to use for executing the callback:
- LEFT = 1
- MIDDLE = 2
- RIGHT = 3
- BACK = 8
- FORWARD = 9
The default is None in which case 1 (e.g. LEFT is used)
modifier : str or None
Define a keypress-modifier to execute the callback only if the
corresponding key is pressed on the keyboard.
- If None, the callback is executed if no modifier is activated.
The default is None.
on_motion : bool
!! Only relevant for "click" callbacks !!
- True: Continuously execute the callback if the mouse is moved while the
assigned button is pressed.
- False: Only execute the callback on clicks.
The default is True.
For additional keyword-arguments check the doc of the callback-functions!
Examples
--------
Get a (temporary) annotation on a LEFT-double-click:
>>> m.cb.click.attach.annotate(double_click=True, button=1, permanent=False)
Permanently color LEFT-clicked pixels red with a black border:
>>> m.cb.pick.attach.mark(facecolor="r", edgecolor="k", permanent=True)
Attach a customly defined callback
>>> def some_callback(self, asdf, **kwargs):
>>> print("hello world")
>>> print("the position of the clicked pixel", kwargs["pos"])
>>> print("the data-index of the clicked pixel", kwargs["ID"])
>>> print("data-value of the clicked pixel", kwargs["val"])
>>> print("the plot-crs is:", self.crs_plot)
>>> m.cb.pick.attach(some_callback, double_click=False, button=1, asdf=1)
"""
def __init__(self, parent):
self._parent = parent
# attach pre-defined callbacks
for cb in self._parent._cb_list:
setattr(
self,
cb,
update_wrapper(
partial(self._parent._add_callback, callback=cb),
getattr(self._parent._cb, cb),
),
)
def __call__(self, f, double_click=False, button=None, modifier=None, **kwargs):
"""
Add a custom callback-function to the map.
Parameters
----------
f : callable
the function to attach to the map.
The call-signature is:
>>> def some_callback(asdf, **kwargs):
>>> print("hello world")
>>> print("the position of the clicked pixel", kwargs["pos"])
>>> print("the data-index of the clicked pixel", kwargs["ID"])
>>> print("data-value of the clicked pixel", kwargs["val"])
>>>
>>> m.cb.attach(some_callback, asdf=1)
double_click : bool
Indicator if the callback should be executed on double-click (True)
or on single-click events (False)
button : int
The mouse-button to use for executing the callback:
- LEFT = 1
- MIDDLE = 2
- RIGHT = 3
- BACK = 8
- FORWARD = 9
The default is None in which case 1 (e.g. the LEFT button) is used
modifier : str or None
Define a keypress-modifier to execute the callback only if the
corresponding key is pressed on the keyboard.
- If None, the callback is executed if no modifier is activated.
The default is None.
on_motion : bool
!! Only relevant for "click" callbacks !!
- True: Continuously execute the callback if the mouse is moved while the
assigned button is pressed.
- False: Only execute the callback on clicks.
The default is True.
kwargs :
kwargs passed to the callback-function
For documentation of the individual functions check the docs in `m.cb`
Returns
-------
cid : int
the ID of the attached callback
"""
if button is None:
button = self._parent._default_button
return self._parent._add_callback(
callback=f,
double_click=double_click,
button=button,
modifier=modifier,
**kwargs,
)
class _get:
"""Accessor for objects generated/retrieved by callbacks."""
def __init__(self, parent):
self.m = parent._m
self.cb = parent._cb
self.cbs = dict()
@property
def picked_object(self):
if hasattr(self.cb, "picked_object"):
return self.cb.picked_object
else:
_log.warning(
"EOmaps: No picked objects found. Attach "
"the 'load' callback first!"
)
@property
def picked_vals(self):
if hasattr(self.cb, "picked_vals"):
return self.cb.picked_vals
else:
_log.warning(
"EOmaps: No picked values found. Attach "
"the 'get_vals' callback first!"
)
@property
def permanent_markers(self):
if hasattr(self.cb, "permanent_markers"):
return self.cb.permanent_markers
else:
_log.warning(
"EOmaps: No permanent markers found. Attach "
"the 'mark' callback with 'permanent=True' first!"
)
@property
def permanent_annotations(self):
if hasattr(self.cb, "permanent_annotations"):
return self.cb.permanent_annotations
else:
_log.warning(
"EOmaps: No permanent annotations found. Attach "
"the 'annotate' callback with 'permanent=True' first!"
)
@property
def attached_callbacks(self):
cbs = []
for ds, dsdict in self.cbs.items():
for b, bdict in dsdict.items():
for name in bdict.keys():
cbs.append(f"{name}__{ds}__{b}")
return cbs
def _parse_cid(self, cid):
"""
Parse a callbac-id.
Parameters
----------
cid : TYPE
DESCRIPTION.
Returns
-------
name : str
the callback name.
layer : str
the layer to which the callback is attached.
ds : str
indicator if double- or single-click is used.
b : str
the button (e.g. 1, 2, 3 for left, middle, right)
m : str
the keypress modifier.
"""
# do this to allow double-underscores in the layer-name
name, rest = cid.split("__", 1)
layer, ds, b, m = rest.rsplit("__", 3)
return name, layer, ds, b, m
def remove(self, callback=None):
"""
Remove previously attached callbacks from the map.
Parameters
----------
callback : str
the name of the callback to remove
(e.g. the return-value of `m.cb.<method>.attach.<callback>()`)
"""
# remove motion callbacks connected to click-callbacks
if self._method == "click":
if callback in self._connected_move_cbs:
for i in self._connected_move_cbs[callback]:
self._m.cb._click_move.remove(i)
self._connected_move_cbs.pop(callback)
if callback is not None:
name, layer, ds, b, m = self._parse_cid(callback)
cbname = name + "__" + layer
bname = f"{b}__{m}"
dsdict = self.get.cbs.get(ds, None)
if dsdict is not None:
if bname in dsdict:
bdict = dsdict.get(bname)
else:
_log.error(f"EOmaps: There is no callback named {callback}")
return
else:
_log.error(f"EOmaps: There is no callback named {callback}")
return
if bdict is not None:
if cbname in bdict:
del bdict[cbname]
# call cleanup methods on removal
fname = name.rsplit("_", 1)[0]
if hasattr(self._cb, f"_{fname}_cleanup"):
getattr(self._cb, f"_{fname}_cleanup")()
else:
_log.error(f"EOmaps: There is no callback named {callback}")
def set_sticky_modifiers(self, *args):
"""
Define keys on the keyboard that should be treated as "sticky modifiers".
"sticky modifiers" are used in "click"- "pick"- and "move" callbacks to define
modifiers that should remain active even if the corresponding key on the
keyboard is released.
- a "sticky modifier" <KEY> will remain activated until
- "ctrl + <KEY>" is pressed to deactivate the sticky modifier
- another sticky modifier key is pressed on the keyboard
Parameters
----------
args : str
Any positional argument passed to this function will be used as
sticky-modifier, e.g.:
>>> m.cb.click.set_sticky_modifiers("a", "1", "x")
Examples
--------
>>> m = Maps()
>>> m.cb.click.attach.annotate(modifier="1")
>>> m.cb.click.set_sticky_modifiers("1")
"""
self._sticky_modifiers = list(map(str, args))
if self._method == "click":
self._m.cb._click_move._sticky_modifiers = args
def _init_picker(self):
assert (
self._m.coll is not None
), "you can only attach pick-callbacks after calling `plot_map()`!"
try:
# Lazily make a plotted dataset pickable a
if getattr(self._m, "tree", None) is None:
from .helpers import SearchTree
self._m.tree = SearchTree(m=self._m._proxy(self._m))
self._m.cb.pick._set_artist(self._m.coll)
self._m.cb.pick._init_cbs()
self._m.cb._methods.add("pick")
except Exception as ex:
_log.exception(
"EOmaps: There was an error while trying to initialize "
f"pick-callbacks!",
)
def _add_callback(
self,
*args,
callback=None,
double_click=False,
button=None,
modifier=None,
**kwargs,
):
"""
Attach a callback to the plot that will be executed if a pixel is clicked.
A list of pre-defined callbacks (accessible via `m.cb`) or customly defined
functions can be used.
>>> # to add a pre-defined callback use:
>>> cid = m._add_callback("annotate", <kwargs passed to m.cb.annotate>)
>>> # to remove the callback again, call:
>>> m.remove_callback(cid)
Parameters
----------
callback : callable or str
The callback-function to attach.
If a string is provided, it will be used to assign the associated function
from the `m.cb` collection:
- "annotate" : add annotations to the clicked pixel
- "mark" : add markers to the clicked pixel
- "plot" : dynamically update a plot with the clicked values
- "print_to_console" : print info of the clicked pixel to the console
- "get_values" : save properties of the clicked pixel to a dict
- "load" : use the ID of the clicked pixel to load data
- "clear_annotations" : clear all existing annotations
- "clear_markers" : clear all existing markers
You can also define a custom function with the following call-signature:
>>> def some_callback(asdf, **kwargs):
>>> print("hello world")
>>> print("the position of the clicked pixel", kwargs["pos"])
>>> print("the data-index of the clicked pixel", kwargs["ID"])
>>> print("data-value of the clicked pixel", kwargs["val"])
>>> print("asdf is set to:", asdf)
>>> m.cb.attach(some_callback, double_click=False, button=1, asdf=1)
double_click : bool
Indicator if the callback should be executed on double-click (True)
or on single-click events (False)
button : int
The mouse-button to use for executing the callback:
- LEFT = 1
- MIDDLE = 2
- RIGHT = 3
- BACK = 8
- FORWARD = 9
The default is None in which case 1 (e.g. LEFT is used)
modifier : str or None
Define a keypress-modifier to execute the callback only if the
corresponding key is pressed on the keyboard.
- If None, the callback is executed if no modifier is activated.
The default is None.
on_motion : bool
!! Only relevant for "click" callbacks !!
- True: Continuously execute the callback if the mouse is moved while the
assigned button is pressed.
- False: Only execute the callback on clicks.
The default is True.
**kwargs :
kwargs passed to the callback-function
For documentation of the individual functions check the docs in `m.cb`
Returns
-------
cbname : str
the identification string of the callback
(to remove the callback, use `m.cb.remove(cbname)`)
"""
if button is None:
button = self._default_button
if self._method == "pick":
if self._m.coll is None:
# lazily initialize the picker when the layer is fetched
self._m._data_manager._on_next_fetch.append(self._init_picker)
else:
self._init_picker()
# attach "on_move" callbacks
movecb_name = None
# set on_motion True for "click" callbacks and False otherwise
on_motion = kwargs.pop("on_motion", True if self._method == "click" else False)
if self._method == "click" and on_motion is True:
movecb_name = self._m.cb._click_move._add_callback(
*args,
callback=callback,
double_click=double_click,
button=button,
modifier=modifier,
**kwargs,
)
elif on_motion is True:
_log.warning(
"EOmaps: 'on_motion=True' is only possible for " "'click' callbacks!"
)
assert not all(
i in kwargs for i in ["pos", "ID", "val", "double_click", "button"]
), 'the names "pos", "ID", "val" cannot be used as keyword-arguments!'
if isinstance(callback, str):
assert hasattr(self._cb, callback), (
f"The function '{callback}' does not exist as a pre-defined callback."
+ " Use one of:\n - "
+ "\n - ".join(self._cb_list)
)
callback = getattr(self._cb, callback)
if double_click is True:
btn_key = "double"
elif double_click == "release":
btn_key = "release"
else:
btn_key = "single"
# check for modifiers
button_modifier = f"{button}__{modifier}"
d = self.get.cbs.setdefault(btn_key, dict()).setdefault(button_modifier, dict())
# get a unique name for the callback
# name_idx__layer
ncb = [
int(i.split("__")[0].rsplit("_", 1)[1])
for i in d
if i.startswith(callback.__name__)
]
cbkey = (
callback.__name__
+ f"_{max(ncb) + 1 if len(ncb) > 0 else 0}"
+ f"__{self._m.layer}"
)
d[cbkey] = partial(callback, *args, **kwargs)
# add mouse-button assignment as suffix to the name (with __ separator)
cbname = cbkey + f"__{btn_key}__{button}__{modifier}" # TODO
if movecb_name is not None:
self._connected_move_cbs[cbname] = [movecb_name]
return cbname
class ClickContainer(_ClickContainer):
"""
Callbacks that are executed if you click anywhere on the Map.
NOTE
----
You can use `on_motion=False` when attaching a callback to avoid triggering
the callback if the mouse is moved while a button is pressed.
Methods
-------
attach : accessor for callbacks.
Executing the functions will attach the associated callback to the map!
get : accessor for return-objects
A container to provide easy-access to the return-values of the callbacks.
remove : remove prviously added callbacks from the map
forward_events : forward events to connected maps-objects
share_events : share events between connected maps-objects (e.g. forward both ways)
set_sticky_modifiers : define keypress-modifiers that remain active after release
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cid_button_press_event = None
self._cid_button_release_event = None
self._cid_motion_event = None
self._event = None
def _init_cbs(self):
if self._m.parent is self._m:
self._add_click_callback()
def _get_clickdict(self, event):
clickdict = dict(
pos=(event.xdata, event.ydata),
ID=None,
val=None,
ind=None,
)
return clickdict
def _execute_cbs(self, event, cids):
"""
Execute a list of callbacks based on an event and the cid
Parameters
----------
event :
The event to use.
cids : list of str
A list of the cids of the callbacks that should be executed.
"""
clickdict = self._get_clickdict(event)
for cid in cids:
name, layer, ds, button, mod = self._parse_cid(cid)
cbs = self.get.cbs.get(ds, dict()).get(f"{button}__{mod}", dict())
cb = cbs.get(f"{name}__{layer}", None)
if cb is not None:
cb(**clickdict)
def _onclick(self, event):
clickdict = self._get_clickdict(event)
if event.dblclick:
cbs = self.get.cbs.get("double", dict())
else:
cbs = self.get.cbs.get("single", dict())
# check for keypress-modifiers
if (
event.key is None
and self._m.cb.keypress._modifier in self._sticky_modifiers
):
# in case sticky_modifiers are defined, use the last pressed modifier
event_key = self._m.cb.keypress._modifier
else:
event_key = event.key
button_modifier = f"{event.button}__{event_key}"
self._event = event
if button_modifier in cbs:
bcbs = cbs[button_modifier]
for key in self._sort_cbs(bcbs):
layer = key.split("__", 1)[1]
if not self._execute_cb(layer):
return
cb = bcbs[key]
if clickdict is not None:
cb(**clickdict)
def _onrelease(self, event):
cbs = self.get.cbs.get("release", dict())
# check for keypress-modifiers
if (
event.key is None
and self._m.cb.keypress._modifier in self._sticky_modifiers
):
# in case sticky_modifiers are defined, use the last pressed modifier
event_key = self._m.cb.keypress._modifier
else:
event_key = event.key
button_modifier = f"{event.button}__{event_key}"
if button_modifier in cbs:
clickdict = self._get_clickdict(event)
bcbs = cbs[button_modifier]
for cb in bcbs.values():
cb(**clickdict)
def _reset_cids(self):
# clear all temporary artists
self._clear_temporary_artists()
self._m.BM._clear_temp_artists(self._method)
if self._cid_button_press_event:
self._m.f.canvas.mpl_disconnect(self._cid_button_press_event)
self._cid_button_press_event = None
if self._cid_motion_event:
self._m.f.canvas.mpl_disconnect(self._cid_motion_event)
self._cid_motion_event = None
if self._cid_button_release_event:
self._m.f.canvas.mpl_disconnect(self._cid_button_release_event)
self._cid_button_release_event = None
def _add_click_callback(self):
def clickcb(event):
if not self._m.cb.get_execute_callbacks():
return
try:
self._event = event
# don't execute callbacks if a toolbar-action is active
if (
self._m.f.canvas.toolbar is not None
) and self._m.f.canvas.toolbar.mode != "":
return
# execute onclick on the maps object that belongs to the clicked axis
# and forward the event to all forwarded maps-objects
for obj in self._objs:
# clear temporary artists before executing new callbacks to avoid
# having old artists around when callbacks are triggered again
obj._clear_temporary_artists()
obj._onclick(event)
# forward callbacks to the connected maps-objects
obj._fwd_cb(event)
self._m.BM._clear_temp_artists(self._method)
self._m.parent.BM.update(clear=self._method)
except ReferenceError:
pass
def releasecb(event):
if not self._m.cb.get_execute_callbacks():
return
try:
self._event = event
# don't execute callbacks if a toolbar-action is active
if (
self._m.f.canvas.toolbar is not None
) and self._m.f.canvas.toolbar.mode != "":
return
# execute onclick on the maps object that belongs to the clicked axis
# and forward the event to all forwarded maps-objects
for obj in self._objs:
# don't clear temporary artists in here since we want
# click (or click+move) artists to remain on the plot when the
# button is released!
obj._onrelease(event)
# forward callbacks to the connected maps-objects
obj._fwd_cb(event)
except ReferenceError:
# ignore errors caused by no-longer existing weakrefs
pass
if self._cid_button_press_event is None:
# ------------- add a callback
self._cid_button_press_event = self._m.f.canvas.mpl_connect(
"button_press_event", clickcb
)
if self._cid_button_release_event is None:
# ------------- add a callback
self._cid_button_release_event = self._m.f.canvas.mpl_connect(
"button_release_event", releasecb
)
def _fwd_cb(self, event):
# click container events are MouseEvents!
if event.inaxes != self._m.ax:
return
if event.name == "button_release_event":
for key, m in self._fwd_cbs.items():
obj = self._getobj(m)
if obj is None:
continue
obj._onrelease(event)
else:
for key, m in self._fwd_cbs.items():
obj = self._getobj(m)
# clear all temporary artists that are still around
obj._clear_temporary_artists()
if obj is None:
continue
transformer = Transformer.from_crs(
self._m.crs_plot,
m.crs_plot,
always_xy=True,
)
# transform the coordinates of the clicked location
xdata, ydata = transformer.transform(event.xdata, event.ydata)
dummymouseevent = SimpleNamespace(
inaxes=m.ax,
dblclick=event.dblclick,
button=event.button,
xdata=xdata,
ydata=ydata,
key=event.key
# x=event.mouseevent.x,
# y=event.mouseevent.y,
)
obj._onclick(dummymouseevent)
class MoveContainer(ClickContainer):
"""
Callbacks that are executed if you move the mouse without holding down a button.
Methods
-------
attach : accessor for callbacks.
Executing the functions will attach the associated callback to the map!
get : accessor for return-objects
A container to provide easy-access to the return-values of the callbacks.
remove : remove prviously added callbacks from the map
forward_events : forward events to connected maps-objects
share_events : share events between connected maps-objects (e.g. forward both ways)
set_sticky_modifiers : define keypress-modifiers that remain active after release
"""
# this is just a copy of ClickContainer to manage motion-sensitive callbacks
def __init__(self, button_down=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cid_motion_event = None
self._button_down = button_down
def _init_cbs(self):
if self._m.parent is self._m:
self._add_move_callback()
def _reset_cids(self):
# clear all temporary artists
self._clear_temporary_artists()
self._m.BM._clear_temp_artists(self._method)
if self._cid_motion_event:
self._m.f.canvas.mpl_disconnect(self._cid_motion_event)
self._cid_motion_event = None
def _add_move_callback(self):
def movecb(event):
if not self._m.cb.get_execute_callbacks():
return
try:
self._event = event
# only execute movecb if a mouse-button is holded down
# and only if the motion is happening inside the axes
if self._button_down:
if not event.button: # or (event.inaxes != self._m.ax):
# always clear temporary move-artists
if self._method == "move":
for obj in self._objs:
obj._clear_temporary_artists()
self._m.BM._clear_temp_artists(self._method)
return
else:
if event.button: # or (event.inaxes != self._m.ax):
# always clear temporary move-artists
if self._method == "move":
for obj in self._objs:
obj._clear_temporary_artists()
self._m.BM._clear_temp_artists(self._method)
return
# don't execute callbacks if a toolbar-action is active
if (
self._m.f.canvas.toolbar is not None
) and self._m.f.canvas.toolbar.mode != "":
return
# execute onclick on the maps object that belongs to the clicked axis
# and forward the event to all forwarded maps-objects
update = False
for obj in self._objs:
# check if there is a reason to update
if update is False:
if len(obj.get.attached_callbacks) > 0:
update = True
# clear temporary artists before executing new callbacks to avoid
# having old artists around when callbacks are triggered again
obj._clear_temporary_artists()
self._m.BM._clear_temp_artists(self._method)
obj._onclick(event)
# forward callbacks to the connected maps-objects
obj._fwd_cb(event)
# only update if a callback is attached
# (to avoid constantly calling update)
if update:
if self._button_down:
if event.button:
self._m.parent.BM.update(clear=self._method)
else:
self._m.parent.BM.update(clear=self._method)
except ReferenceError:
pass
if self._cid_motion_event is None:
# for click-callbacks, allow motion-detection
self._cid_motion_event = self._m.f.canvas.mpl_connect(
"motion_notify_event", movecb
)
class PickContainer(_ClickContainer):
"""
Callbacks that select the nearest datapoint if you click on the map.
The event will search for the closest data-point and execute the callback
with the properties (e.g. position , ID, value) of the selected point.
Note
----
To speed up identification of points for very large datasets, the search
is limited to points located inside a "search rectangle".
The side-length of this rectangle is determined in the plot-crs and can be
set via `m.cb.pick.set_props(search_radius=...)`.
The default is to use a side-length of 50 times the dataset-radius.
Methods
-------
attach : accessor for callbacks.
Executing the functions will attach the associated callback to the map!
get : accessor for return-objects
A container to provide easy-access to the return-values of the callbacks.
remove : remove prviously added callbacks from the map
forward_events : forward events to connected maps-objects
share_events : share events between connected maps-objects (e.g. forward both ways)
set_sticky_modifiers : define keypress-modifiers that remain active after release
set_props : set the picking behaviour (e.g. number of points, search radius, etc.)
"""
def __init__(self, picker_name="default", picker=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cid_pick_event = dict()
self._picker_name = picker_name
self._artist = None
self._n_ids = 1
self._consecutive_multipick = False
self._pick_relative_to_closest = True
self._search_radius = "50"
if picker is None:
self._picker = self._default_picker
else:
self._picker = picker
def __getitem__(self, name):
name = str(name)
if name.startswith("_"):
container_name = "_pick__" + name[1:]
else:
container_name = "pick__" + name
if hasattr(self._m.cb, container_name):
return getattr(self._m.cb, container_name)
else:
_log.error(
f"the picker {name} does not exist...", "use `m.cb.add_picker` first!"
)
def set_props(
self,
n=None,
consecutive_pick=None,
pick_relative_to_closest=None,
search_radius=None,
):
"""
Set the picker-properties (number of picked points, max. search radius, etc.).
Only provided arguments will be updated!
Parameters
----------
n : int, optional
The number of nearest neighbours to pick at each pick-event.
The default is 1.
consecutive_pick : bool, optional
- If True, pick-callbacks will be executed consecutively for each
picked datapoint.
- if False, pick-callbacks will get lists of all picked values
as input-arguments
The default is False.
pick_relative_to_closest : bool, optional
ONLY relevant if `n > 1`.
- If True: pick (n) nearest neighbours based on the center of the
closest identified datapoint
- If False: pick (n) nearest neighbours based on the click-position
The default is True.
search_radius : int, float, str or None optional
Set the radius of the area that is used to limit the number of
pixels when searching for nearest-neighbours.
- if `int` or `float`, the radius of the circle in units of the plot_crs
- if `str`, a multiplication-factor for the estimated data-radius.
NOTE: The multiplied radius is defined in the plot projection!
If the data was provided in a different projection, the radius
estimated from the re-projected data is used (might be different
from the actual shape radius!)
The default is "50" (e.g. 50 times the data-radius).
"""
if n is not None:
self._n_ids = n
if consecutive_pick is not None:
self._consecutive_multipick = consecutive_pick
if pick_relative_to_closest is not None:
self._pick_relative_to_closest = pick_relative_to_closest
if search_radius is not None:
self._search_radius = search_radius
def _set_artist(self, artist):
self._artist = artist
self._artist.set_picker(self._picker)
def _init_cbs(self):
# if self._m.parent is self._m:
self._add_pick_callback()
def _default_picker(self, artist, event):
# make sure that objects are only picked if we are on the right layer
if not self._execute_cb(self._m.layer):
return False, None
try:
# if no pick-callback is attached, don't identify the picked point
if len(self.get.cbs) == 0:
return False, None
except ReferenceError:
# in case we encounter a reference-error, remove the picker from the artist
# (happens if the picker originates from a no-longer existing Maps object)
self._artist.set_picker(None)
return False, None
if (event.inaxes != self._m.ax) or not hasattr(self._m, "tree"):
return False, dict(ind=None, dblclick=event.dblclick, button=event.button)
# make sure non-finite coordinates (resulting from projections in
# forwarded callbacks) don't lead to issues
if not np.isfinite((event.xdata, event.ydata)).all():
return False, dict(ind=None, dblclick=event.dblclick, button=event.button)
# update the search-radius if necessary
# (do this here to allow setting a multiplier for the dataset-radius
# without having to plot it first!)
if self._search_radius != self._m.tree._search_radius:
self._m.tree.set_search_radius(self._search_radius)
# find the closest point to the clicked pixel
index = self._m.tree.query(
(event.xdata, event.ydata),
k=self._n_ids,
pick_relative_to_closest=self._pick_relative_to_closest,
)
if index is not None:
pos = self._m._data_manager._get_xy_from_index(index, reprojected=True)
val = self._m._data_manager._get_val_from_index(index)
ID = self._m._data_manager._get_id_from_index(index)
try:
val_color = artist.cmap(artist.norm(val))
except Exception:
val_color = None
return True, dict(
dblclick=event.dblclick,
button=event.button,
ind=index,
ID=ID,
pos=pos,
val=val,
val_color=val_color,
)
else:
# do this to "unpick" previously picked datapoints if you click
# outside the data-extent
return True, dict(ind=None, dblclick=event.dblclick, button=event.button)
return False, None
def _get_pickdict(self, event):
event_ind = event.ind
n_inds = len(np.atleast_1d(event_ind))
# mouseevent = event.mouseevent
noval = [None] * n_inds if n_inds > 1 else None
ID = getattr(event, "ID", noval)
pos = getattr(event, "pos", noval)
val = getattr(event, "val", noval)
ind = getattr(event, "ind", noval)
val_color = getattr(event, "val_color", noval)
if ind is not None:
if self._consecutive_multipick is False:
# return all picked values as arrays
clickdict = dict(
ID=ID, # convert IDs to numpy-arrays!
pos=pos,
val=val,
ind=ind,
val_color=val_color,
picker_name=self._picker_name,
)
return clickdict
else:
if n_inds > 1:
clickdicts = []
for i in range(n_inds):
clickdict = dict(
ID=ID[i],
pos=(pos[0][i], pos[1][i]),
val=val[i],
ind=ind[i],
val_color=val_color[i],
picker_name=self._picker_name,
)
clickdicts.append(clickdict)
else:
clickdicts = [
dict(
ID=ID, # convert IDs to numpy-arrays!
pos=pos,
val=val,
ind=ind,
val_color=val_color,
picker_name=self._picker_name,
)
]
return clickdicts
def _onpick(self, event):
if event.artist is not self._artist:
return
# only execute onpick if the correct layer is visible
# (relevant for forwarded callbacks)
if not self._execute_cb(self._m.layer):
return
# don't execute callbacks if a toolbar-action is active
if (
self._m.f.canvas.toolbar is not None
) and self._m.f.canvas.toolbar.mode != "":
return
# make sure temporary artists are cleared before executing new callbacks
# to avoid having old artists around when callbacks are triggered again
self._clear_temporary_artists()
self._m.BM._clear_temp_artists(self._method)
clickdict = self._get_pickdict(event)
if event.mouseevent.dblclick:
cbs = self.get.cbs.get("double", dict())
else:
cbs = self.get.cbs.get("single", dict())
# check for keypress-modifiers
if (
event.mouseevent.key is None
and self._m.cb.keypress._modifier in self._sticky_modifiers
):
# in case sticky_modifiers are defined, use the last pressed modifier
event_key = self._m.cb.keypress._modifier
else:
event_key = event.mouseevent.key
button_modifier = f"{event.mouseevent.button}__{event_key}"
if button_modifier in cbs:
bcbs = cbs[button_modifier]
for key in self._sort_cbs(bcbs):
layer = key.split("__", 1)[1]
if not self._execute_cb(layer):
# only execute callbacks if the layer name of the associated
# maps-object is active
return
cb = bcbs[key]
if clickdict is not None:
if self._consecutive_multipick is False:
cb(**clickdict)
else:
for c in clickdict:
cb(**c)
def _reset_cids(self):
# clear all temporary artists
self._clear_temporary_artists()
self._m.BM._clear_temp_artists(self._method)
for method, cid in self._cid_pick_event.items():
self._m.f.canvas.mpl_disconnect(cid)
self._cid_pick_event.clear()
def _add_pick_callback(self):
# execute onpick and forward the event to all connected Maps-objects
def pickcb(event):
if not self._m.cb.get_execute_callbacks():
return
try:
# make sure pickcb is only executed if we are on the right layer
if not self._execute_cb(self._m.layer):
return
# don't execute callbacks if a toolbar-action is active
if (
self._m.f.canvas.toolbar is not None
) and self._m.f.canvas.toolbar.mode != "":
return
if not self._artist is event.artist:
return
self._event = event
# execute "_onpick" on the maps-object that belongs to the clicked axes
# and forward the event to all forwarded maps-objects
self._onpick(event)
# forward callbacks to the connected maps-objects
self._fwd_cb(event, self._picker_name)
# don't update here... the click-callback will take care of it!
except ReferenceError:
pass
# attach the callbacks (only once per method!)
if self._method not in self._cid_pick_event:
self._cid_pick_event[self._method] = self._m.f.canvas.mpl_connect(
"pick_event", pickcb
)
def _fwd_cb(self, event, picker_name):
# PickEvents have a .mouseevent property for the associated MouseEvent!
if event.mouseevent.inaxes != self._m.ax:
return
for key, m in self._fwd_cbs.items():
obj = self._getobj(m)
if obj is None:
continue
transformer = Transformer.from_crs(
self._m.crs_plot,
m.crs_plot,
always_xy=True,
)
# transform the coordinates of the clicked location to the
# crs of the map
xdata, ydata = transformer.transform(
event.mouseevent.xdata, event.mouseevent.ydata
)
dummymouseevent = SimpleNamespace(
inaxes=m.ax,
dblclick=event.mouseevent.dblclick,
button=event.mouseevent.button,
xdata=xdata,
ydata=ydata,
key=event.mouseevent.key,
# x=event.mouseevent.x,
# y=event.mouseevent.y,
)
dummyevent = SimpleNamespace(
artist=obj._artist,
dblclick=event.mouseevent.dblclick,
button=event.mouseevent.button,
# inaxes=m.ax,
mouseevent=dummymouseevent,
# picker_name=picker_name,
)
pick = obj._picker(obj._artist, dummymouseevent)
if pick[1] is not None:
dummyevent.ID = pick[1].get("ID", None)
dummyevent.ind = pick[1].get("ind", None)
dummyevent.val = pick[1].get("val", None)
dummyevent.pos = pick[1].get("pos", None)
dummyevent.val_color = pick[1].get("val_color", None)
else:
dummyevent.ind = None
obj._onpick(dummyevent)
class KeypressContainer(_CallbackContainer):
"""
Callbacks that are executed if you press a key on the keyboard.
Methods
-------
attach : accessor for callbacks.
Executing the functions will attach the associated callback to the map!
get : accessor for return-objects
A container to provide easy-access to the return-values of the callbacks.
remove : remove prviously added callbacks from the map
forward_events : forward events to connected maps-objects
share_events : share events between connected maps-objects (e.g. forward both ways)
set_sticky_modifiers : define keypress-modifiers that remain active after release
"""
def __init__(self, m, cb_cls=None, method="keypress"):
super().__init__(m, cb_cls, method)
self._cid_keypress_event = None
# remember last pressed key (for use as "sticky_modifier")
self._modifier = None
def _init_cbs(self):
if self._m.parent is self._m:
self._initialize_callbacks()
def _reset_cids(self):
# clear all temporary artists
self._clear_temporary_artists()
self._m.BM._clear_temp_artists(self._method)
if self._cid_keypress_event:
self._m.f.canvas.mpl_disconnect(self._cid_keypress_event)
self._cid_keypress_event = None
def _initialize_callbacks(self):
def _onpress(event):
if not self._m.cb.get_execute_callbacks():
return
try:
self._event = event
# remember keypress event in case sticky modifiers are used for
# click or pick callbacks
k = str(event.key)
if self._modifier is not None and (
k == "ctrl+" + self._modifier or k == "escape"
):
self._modifier = None
_log.info("EOmaps: sticky modifier set to: None")
elif self._modifier != k:
methods = []
if k in self._m.cb.click._sticky_modifiers:
methods.append("click")
if k in self._m.cb.pick._sticky_modifiers:
methods.append("pick")
if k in self._m.cb.move._sticky_modifiers:
methods.append("move")
if methods:
_log.info(
"EOmaps: sticky modifier set to: "
f"{k} ({', '.join(methods)})"
)
self._modifier = k
for obj in self._objs:
# only trigger callbacks on the right layer
if not self._execute_cb(obj._m.layer):
continue
if any(i in obj.get.cbs for i in (event.key, None)):
# do this to allow deleting callbacks with a callback
# otherwise modifying a dict during iteration is problematic!
cbs = {
**obj.get.cbs.get(event.key, dict()),
**obj.get.cbs.get(None, dict()),
}
names = list(cbs)
for name in names:
if name in cbs:
cbs[name](key=event.key)
# DO NOT UPDATE in here!
# otherwise keypress modifiers for peek-layer callbacks will
# have glitches!
# self._m.parent.BM.update(clear=self._method)
except ReferenceError:
pass
if self._m is self._m.parent:
self._cid_keypress_event = self._m.f.canvas.mpl_connect(
"key_press_event", _onpress
)
class _attach:
"""
Attach custom or pre-defined callbacks on keypress events.
Each callback takes 1 additional keyword-arguments:
key : str or None
The key to use.
- Modifiers are attached with a '+', e.g. "alt+d"
- If None, the callback will be fired on any key!
For additional keyword-arguments check the doc of the callback-functions!
Examples
--------
Attach a pre-defined callback:
>>> m.cb.keypress.attach.switch_layer(layer=1, key="1")
Attach a custom callback:
>>> def cb(**kwargs):
>>> ... do something ...
>>>
>>> m.cb.keypress.attach(cb, key="3")
"""
def __init__(self, parent):
self._parent = parent
# attach pre-defined callbacks
for cb in self._parent._cb_list:
setattr(
self,
cb,
update_wrapper(
partial(self._parent._add_callback, callback=cb),
getattr(self._parent._cb, cb),
),
)
def __call__(self, f, key, **kwargs):
"""
Add a custom callback-function to the map.
Parameters
----------
f : callable
the function to attach to the map.
The call-signature is:
>>> def some_callback(asdf, **kwargs):
>>> print("hello world, asdf=", asdf)
>>>
>>> m.cb.attach(some_callback, asdf=1)
key : str or None
The key to use.
- Modifiers are attached with a '+', e.g. "alt+d"
- If None, the callback will be fired on any key!
**kwargs :
kwargs passed to the callback-function
For documentation of the individual functions check the docs in `m.cb`
Returns
-------
cid : int
the ID of the attached callback
"""
if key is not None and not isinstance(key, str):
raise TypeError(
"EOmaps: The 'key' for keypress-callbacks must be a string!"
)
return self._parent._add_callback(f, key, **kwargs)
class _get:
def __init__(self, parent):
self.m = parent._m
self.cb = parent._cb
self.cbs = dict()
@property
def attached_callbacks(self):
cbs = []
for key, cbdict in self.cbs.items():
for name, cb in cbdict.items():
cbs.append(f"{name}__{key}")
return cbs
def _parse_cid(self, cid):
name, rest = cid.split("__", 1)
layer, key = rest.rsplit("__", 1)
return name, layer, key
def remove(self, callback=None):
"""
Remove an attached callback from the figure.
Parameters
----------
callback : int, str or tuple
if str: the name of the callback to remove
(`<function_name>_<count>__<layer>__<key>`)
"""
if callback is not None:
name, layer, key = self._parse_cid(callback)
cbname = name + "__" + layer
cbs = self.get.cbs.get(key, None)
if cbs is not None:
if cbname in cbs:
del cbs[cbname]
# call cleanup methods on removal
fname = name.rsplit("_", 1)[0]
if hasattr(self._cb, f"_{fname}_cleanup"):
getattr(self._cb, f"_{fname}_cleanup")()
else:
_log.error(f"EOmaps: there is no callback named {callback}")
else:
_log.error(f"EOmaps: there is no callback named {callback}")
def _add_callback(self, callback, key="x", **kwargs):
"""
Attach a callback to the plot that will be executed if a key is pressed.
A list of pre-defined callbacks (accessible via `m.cb`) or customly defined
functions can be used.
>>> # to add a pre-defined callback use:
>>> cid = m._add_callback("annotate", <kwargs passed to m.cb.annotate>)
>>> # to remove the callback again, call:
>>> m.remove_callback(cid)
Parameters
----------
callback : callable or str
The callback-function to attach.
key : str or None
The key to use.
- Modifiers are attached with a '+', e.g. "alt+d"
- If None, the callback will be fired on any key!
**kwargs :
kwargs passed to the callback-function
For documentation of the individual functions check the docs in `m.cb`
Returns
-------
cbname : str
the identification string of the callback
(to remove the callback, use `m.cb.remove(cbname)`)
"""
if isinstance(callback, str):
assert hasattr(self._cb, callback), (
f"The function '{callback}' does not exist as a pre-defined callback."
+ " Use one of:\n - "
+ "\n - ".join(self._cb_list)
)
callback = getattr(self._cb, callback)
cbdict = self.get.cbs.setdefault(key, dict())
# get a unique name for the callback
ncb = [
int(i.rsplit("__", 1)[0].rsplit("_", 1)[1])
for i in cbdict
if i.startswith(callback.__name__)
]
cbkey = (
callback.__name__
+ f"_{max(ncb) + 1 if len(ncb) > 0 else 0}"
+ f"__{self._m.layer}"
)
# append the callback
cbdict[cbkey] = partial(callback, **kwargs)
return cbkey + f"__{key}"
class CallbackContainer:
"""
Accessor for attaching callbacks and accessing return-objects.
Methods
-------
- **click** : Execute functions when clicking on the map
- **pick** : Execute functions when you "pick" a pixel on the map
- only available if a dataset has been plotted via `m.plot_map()`
- **keypress** : Execute functions if you press a key on the keyboard
"""
def __init__(self, m):
self._m = m
self._methods = {"click", "pick", "move", "keypress", "_click_move"}
self._click = ClickContainer(
m=self._m,
cb_cls=ClickCallbacks,
method="click",
)
# a move-container that shares temporary artists with the click-container
self._click_move = MoveContainer(
m=self._m,
cb_cls=ClickCallbacks,
method="_click_move",
parent_container=self._click,
button_down=True,
)
self._move = MoveContainer(
m=self._m,
cb_cls=MoveCallbacks,
method="move",
button_down=False,
default_button=None,
)
self._pick = PickContainer(
m=self._m,
cb_cls=PickCallbacks,
method="pick",
)
self._keypress = KeypressContainer(
m=self._m,
cb_cls=KeypressCallbacks,
method="keypress",
)
def get_execute_callbacks(self):
"""
Get if callbacks should be executed or not.
Returns
-------
bool
If True, callbacks are executed.
"""
return self._m.parent._execute_callbacks
def execute_callbacks(self, val):
"""
Activate / deactivate triggering callbacks.
Parameters
----------
val : bool
If True, callbacks will be executed.
"""
self._m.parent._execute_callbacks = val
@property
@wraps(ClickContainer)
def click(self):
"""Attach click callbacks."""
return self._click
@property
@wraps(MoveContainer)
def move(self):
"""Attach move callbacks."""
return self._move
@property
@wraps(PickContainer)
def pick(self):
"""Attach pick callbacks."""
return self._pick
@property
@wraps(KeypressContainer)
def keypress(self):
"""Attach keypress callbacks."""
return self._keypress
def add_picker(self, name, artist, picker):
"""
Attach a custom picker to an artist.
Once attached, callbacks can be assigned just like the default
click/pick callbacks via:
>>> m.cb.pick__<name>. ...
Parameters
----------
name : str, optional
a unique identifier that will be used to identify the pick method.
artist : a matplotlib artist, optional
the artist that should become pickable.
(it must support `artist.set_picker()`)
The default is None.
picker : callable, optional
A callable that is used to perform the picking.
The default is None, in which case the default picker is used.
The call-signature is:
>>> def picker(artist, mouseevent):
>>> # if the pick is NOT successful:
>>> return False, dict()
>>> ...
>>> # if the pick is successful:
>>> return True, dict(ID, pos, val, ind)
Note
----
If the name starts with an underscore (e.g. "_MyPicker") then the
associated container will be accessible via `m._cb._pick__MyPicker`
or via `m.cb.pick["_MyPicker"]`. (This is useful to setup pickers that
are only used internally)
"""
name = str(name)
if picker is not None:
assert name != "default", "'default' is not a valid picker name!"
# if it already exists, return the existing one
assert not hasattr(self._m.cb, name), "the picker '{name}' is already attached!"
if name == "default":
method = "pick"
else:
if name.startswith("_"):
method = "_pick__" + name[1:]
else:
method = "pick__" + name
new_pick = PickContainer(
m=self._m,
cb_cls=PickCallbacks,
method=method,
picker_name=name,
picker=picker,
)
new_pick.__doc__ == PickContainer.__doc__
new_pick._set_artist(artist)
new_pick._init_cbs()
# add the picker method to the accessible cbs
setattr(self._m.cb, new_pick._method, new_pick)
self._methods.add(new_pick._method)
return new_pick
def _init_cbs(self):
for method in self._methods:
obj = getattr(self, method)
obj._init_cbs()
self._remove_default_keymaps()
def _clear_callbacks(self):
# clear all callback containers
for method in self._methods:
obj = getattr(self, method)
obj.get.cbs.clear()
def _reset_cids(self):
# reset the callback functions (required to re-attach the callbacks
# in case the figure is closed and re-initialized)
for method in self._methods:
obj = getattr(self, method)
obj._reset_cids()
@staticmethod
def _remove_default_keymaps():
# unattach default keymaps to avoid interaction with keypress events
assignments = dict()
assignments["keymap.back"] = ["c", "left"]
assignments["keymap.forward"] = ["v", "right"]
assignments["keymap.grid"] = ["g"]
assignments["keymap.grid_minor"] = ["G"]
assignments["keymap.home"] = ["h", "r"]
assignments["keymap.pan"] = ["p"]
assignments["keymap.quit"] = ["q"]
assignments["keymap.save"] = ["s"]
assignments["keymap.xscale"] = ["k", "L"]
assignments["keymap.yscale"] = ["l"]
for key, val in assignments.items():
for v in val:
try:
plt.rcParams[key].remove(v)
except Exception:
pass | PypiClean |
/Flask-Discord-Interactions-2.1.2.tar.gz/Flask-Discord-Interactions-2.1.2/flask_discord_interactions/models/embed.py | from dataclasses import dataclass, asdict
from typing import List
from flask_discord_interactions.models.utils import LoadableDataclass
@dataclass
class Footer(LoadableDataclass):
"Represents the footer of an Embed."
text: str
icon_url: str = None
proxy_icon_url: str = None
@dataclass
class Field(LoadableDataclass):
"Represents a field on an Embed."
name: str
value: str
inline: bool = False
@dataclass
class Media(LoadableDataclass):
"Represents a thumbnail, image, or video on an Embed."
url: str = None
proxy_url: str = None
height: int = None
width: int = None
@dataclass
class Provider(LoadableDataclass):
"Represents a provider of an Embed."
name: str = None
url: str = None
@dataclass
class Author(LoadableDataclass):
"Represents an author of an embed."
name: str = None
url: str = None
icon_url: str = None
proxy_icon_url: str = None
@dataclass
class Embed(LoadableDataclass):
"""
Represents an Embed to be sent as part of a Message.
Attributes
----------
title
The title of the embed.
description
The description in the embed.
url
The URL that the embed title links to.
timestamp
An ISO8601 timestamp included in the embed.
color
An integer representing the color of the sidebar of the embed.
footer
A :class:`Footer` representing the footer of the embed.
image
A :class:`Media` representing the image of the embed.
thumbnail
A :class:`Media` representing the thumbnail of the embed.
video
A :class:`Media` representing the video of the embed.
provider
A :class:`Provider` representing the name and URL of the provider of
the embed.
author
A :class:`Author` representing the author of the embed.
fields
A list of :class:`Field` objects representing the fields of the embed.
"""
title: str = None
description: str = None
url: str = None
timestamp: str = None
color: int = None
footer: Footer = None
image: Media = None
thumbnail: Media = None
video: Media = None
provider: Provider = None
author: Author = None
fields: List[Field] = None
def dump(self):
"""
Returns this Embed as a dictionary, removing fields which are None.
Returns
-------
dict
A dictionary representation of this Embed.
"""
def filter_none(d):
if isinstance(d, dict):
return {k: filter_none(v) for k, v in d.items() if v is not None}
else:
return d
return filter_none(asdict(self)) | PypiClean |
/NeodroidAgent-0.4.8-py36-none-any.whl/neodroidagent/agents/numpy_agents/model_free/tabular_q_agent.py | import typing
from collections import defaultdict
from itertools import count
from typing import Any, Tuple
import gym
import numpy
from tqdm import tqdm
from neodroid.environments.gym_environment import NeodroidGymEnvironment
from neodroidagent.agents.numpy_agents.numpy_agent import NumpyAgent
class TabularQAgent(NumpyAgent):
"""
Agent implementing tabular Q-learning.
"""
# region Private
def __defaults__(self) -> None:
self._action_n = 6
self._init_mean = 0.0
self._init_std = 0.1
self._learning_rate = 0.6
self._discount_factor = 0.95
# endregion
# region Public
def update(self, *args, **kwargs) -> None:
"""
@param args:
@type args:
@param kwargs:
@type kwargs:
"""
pass
def evaluate(self, batch, *args, **kwargs) -> Any:
"""
@param batch:
@type batch:
@param args:
@type args:
@param kwargs:
@type kwargs:
"""
pass
def load(self, *args, **kwargs) -> None:
"""
@param args:
@type args:
@param kwargs:
@type kwargs:
"""
pass
def save(self, *args, **kwargs) -> None:
"""
@param args:
@type args:
@param kwargs:
@type kwargs:
"""
pass
def sample(self, state, **kwargs):
"""
@param state:
@type state:
@param kwargs:
@type kwargs:
@return:
@rtype:
"""
if not isinstance(state, str):
state = str(state)
return super().sample(state)
def sample_random_process(self):
"""
@return:
@rtype:
"""
if hasattr(
self._last_connected_environment.action_space, "signed_one_hot_sample"
):
return self._last_connected_environment.action_space.signed_one_hot_sample()
else:
return self._last_connected_environment.action_space.sample()
def rollout(
self, initial_state, environment, *, train=True, render=False, **kwargs
) -> Any:
"""
@param initial_state:
@type initial_state:
@param environment:
@type environment:
@param train:
@type train:
@param render:
@type render:
@param kwargs:
@type kwargs:
@return:
@rtype:
"""
obs = initial_state
ep_r = 0
steps = 0
for t in count():
action = self.sample(obs)
next_obs, signal, terminal, _ = environment.act(action)
next_obs = str(next_obs)
current_q = self._q_table[obs, action]
future = numpy.max(self._q_table[next_obs])
exp_q = signal + self._discount_factor * future
diff = self._learning_rate * (exp_q - current_q)
self._q_table[obs, action] = current_q + diff
# Q[s, a] = Q[s, a] + lr * (r + y * numpy.max(Q[s1, :]) - Q[s, a])
obs = next_obs
ep_r += signal
if terminal:
print(signal)
steps = t
break
return ep_r, steps
def train_episodically(
self,
env,
*,
rollouts=1000,
render=False,
render_frequency=100,
stat_frequency=10,
**kwargs
):
"""
@param env:
@type env:
@param rollouts:
@type rollouts:
@param render:
@type render:
@param render_frequency:
@type render_frequency:
@param stat_frequency:
@type stat_frequency:
@param kwargs:
@type kwargs:
@return:
@rtype:
"""
obs = env.reset()
obs = str(obs)
for i in range(rollouts):
episode_signal, steps = self.rollout(obs, env)
obs = env.reset()
return self._q_table
# endregion
# region Protected
def _sample_model(self, state, *args, **kwargs) -> Any:
if isinstance(state, typing.Collection) and len(state) == 0:
return [0]
return numpy.argmax(self._q_table[state])
def _optimise_wrt(self, error, *args, **kwargs) -> None:
pass
def __build__(self, env, **kwargs) -> None:
if hasattr(self._last_connected_environment.action_space, "num_binary_actions"):
self._action_n = (
self._last_connected_environment.action_space.num_binary_actions
)
else:
self._action_n = self._last_connected_environment.action_space.n
# self._verbose = True
self._q_table = defaultdict(
lambda: self._init_std * numpy.random.randn(self._action_n)
+ self._init_mean
)
# self._q_table = numpy.zeros([self._environment.observation_space.n, self._environment.action_space.n])
def _train_procedure(self, env, rollouts=10000, *args, **kwargs) -> Tuple[Any, Any]:
model, *stats = self.train_episodically(env, rollouts=rollouts)
return model, stats
# endregion
# region Test
def tabular_test():
"""
"""
env = NeodroidGymEnvironment(environment_name="mab")
agent = TabularQAgent(
observation_space=env.observation_space,
action_space=env.action_space,
environment=env,
)
agent.build(env)
agent.train(env, env)
if __name__ == "__main__":
def taxi():
"""
"""
import gym
import numpy
import random
env = gym.make("Taxi-v2").env
q_table = numpy.zeros([env.space.n, env.action_space.n])
def training():
"""
"""
# Hyparameters
discount = 0.9 # Discount
lr = 0.1 # learning rate
epsilon = 0.1
max_epsilon = 1.0
min_epsilon = 0.01
penalities = 0
sess = tqdm(range(10000))
for i in sess:
state = env.reset()
epochs, penalites, reward = 0, 0, 0
done = False
while not done:
if random.uniform(0, 1) < epsilon:
action = env.action_space.sample()
else:
action = numpy.argmax(q_table[state])
next_state, reward, done, info = env.act(action)
next_max = numpy.max(q_table[next_state])
q_table[state, action] = q_table[state, action] + lr * (
reward + discount * next_max - q_table[state, action]
)
if reward == -10:
penalities += 1
state = next_state
epochs += 1
epsilon = min_epsilon + (max_epsilon - min_epsilon) * numpy.exp(
-0.1 * epsilon
)
if i % 100 == 0:
print(env.render())
print("Training Finished..")
training()
def main():
"""
"""
# env = PuddleWorld(
# world_file_path='/home/heider/Neodroid/agent/draugr_utilities/exclude/saved_maps/PuddleWorldA.dat')
env = gym.make("FrozenLake-v0")
agent = TabularQAgent(
observation_space=env.space, action_space=env.action_space, environment=env
)
agent.build(env)
agent.train(env, env)
tabular_test()
# endregion | PypiClean |
/LogWatcher-1.1.1.tar.gz/LogWatcher-1.1.1/logwatcher/logwatcher.py |
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time,os,re,sys,atexit,ConfigParser
import signal
code_version="$Id: logwatcher.py 233274 2014-06-23 23:20:52Z heitritterw $";
class LogWatcher:
def __init__(self, pidfile=None, daemonize=0, configfile=None, distinguisher=None, debug=0, quit=False, beginning=False, testconfig=False, graphite_server=None, use_graphite=False):
self.log=""
self.fd=None
self.graphite_server=graphite_server
if self.graphite_server:
self.use_graphite = True
else:
self.use_graphite = use_graphite
# initializing, will be populated later
self.plugin_list=[]
self.plugins=[]
self.plugin_dir=None
self.plugin_paths = ["/app/logwatcher/plugins", os.path.dirname(__file__)+"/plugins"]
self.gmetric_brands={}
self.regex={}
self.gmetric={}
# metrics that count matching lines
self.metric_counts={}
# metrics that sum values found
self.metric_sums={}
# metrics that are calculated from other metrics
self.metric_calcs={}
self.metric_calc_expr={}
# metrics that describe distributions
self.metric_dists={}
self.metric_dist_bucketsize={}
self.metric_dist_bucketcount={}
self.ignore_pattern=""
self.ignore=None
self.configfile=configfile
self.debug=debug
self.pidfile=pidfile
self.distinguisher=distinguisher
self.quit=quit
self.beginning=beginning
self.testconfig=testconfig
self.log_time=0
self.log_time_start=0
self.notify_time=0
self.notify_time_start=0
self.readConfig()
signal.signal(signal.SIGHUP, self.reReadConfig)
self.new_metric_count=0 # counts new-found dynamic metrics
self.total_metric_count=0 # counts metrics sent
self.prefix_root="LW_"
self.prefix=self.prefix_root
if self.distinguisher:
self.prefix="%s%s_" % (self.prefix, self.distinguisher)
self.daemonize=daemonize
if self.getPID() < 1:
if self.daemonize == 1:
procdaemonize()
if self.lockPID() == 0:
print "Pidfile found"
sys.exit(-1)
self.log_count=0 # how many different logs have we opened?
self.curr_pos=0
self.prev_pos=0
self.last_time=time.time()
if self.use_graphite and not self.graphite_server:
self.graphite_server = self.readGraphiteConf()
if not self.graphite_server:
print >> sys.stderr, "ERROR: Failed to set graphite server. Using gmetric."
else:
self.use_graphite = True
self.brand_counts={}
if self.graphite_server:
from graphitelib import gMetric
else:
from gmetriclib import gMetric
self.gmetric["Q"]=gMetric("float", "%sQueries" % self.prefix, "count", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["QPS"]=gMetric("float", "%sQPS" % self.prefix, "qps", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["APT"]=gMetric("float", "%sAvg_Processing_Time" % self.prefix, "seconds", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["MAX"]=gMetric("float", "%sMax_Processing_Time" % self.prefix, "seconds", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["TPT"]=gMetric("float", "%sTotal_Processing_Time" % self.prefix, "seconds", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["SLA"]=gMetric("float", "%sexceeding_SLA" % self.prefix, "percent", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["SLA_ct"]=gMetric("float", "%sexceeding_SLA_ct" % self.prefix, "percent", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["code_version"]=gMetric("string", "%sLW_Version" % self.prefix_root, "string", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["ignore"]=gMetric("float", "%signored" % self.prefix, "count", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["NOTIFY_TIME"]=gMetric("float", "%s%s" % (self.prefix_root,"LW_NotifyTime"), "seconds", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["LOG_TIME"]=gMetric("float", "%s%s" % (self.prefix_root,"LW_LogTime"), "seconds", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["NEW_METRICS"]=gMetric("float", "%s%s" % (self.prefix_root,"LW_NewMetrics"), "float", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric["TOTAL_METRICS"]=gMetric("float", "%s%s" % (self.prefix_root,"LW_TotalMetrics"), "float", self.notify_schedule,self.graphite_server,self.debug)
# use this for sub-hourly and other odd log rotation
self.curr_inode=None
self.prime_metrics()
self.initialize_counters()
self.watch()
def readTestConfig(self):
sec="test"
if self.configfile == None:
return 0
try:
cp = ConfigParser.ConfigParser()
cp.read(self.configfile)
self.logformat=cp.get(sec, "log_name_format")
except:
pass
try:
self.notify_schedule=int(cp.get(sec, "notify_schedule"))
except:
pass
def reReadConfig(self,signum,frame):
self.readConfig()
def readConfig(self):
if self.debug:
print >> sys.stderr, "DEBUG: readconfig() called"
sec="logwatcher"
if self.configfile == None:
return 0
try:
cp = ConfigParser.ConfigParser()
cp.read(self.configfile)
self.logformat=cp.get(sec, "log_name_format")
if not self.graphite_server:
try:
self.use_graphite=cp.getboolean(sec, "use_graphite")
except:
pass
# "except -> pass" for those that come in via commandline
if self.pidfile==None:
try:
self.pidfile=cp.get(sec, "pidfile")
except:
pass
if not self.plugin_dir:
try:
self.plugin_list=cp.get(sec, "plugin_dir")
except:
pass
if self.plugin_dir:
if os.path.exists(self.plugin_dir):
sys.path.append(self.plugin_dir)
else:
print >> sys.stderr, "ERROR: %s does not exist" % self.plugin_dir
else:
for pp in self.plugin_paths:
if os.path.exists(pp):
sys.path.append(pp)
break
if not self.plugin_list:
try:
self.plugin_list=cp.get(sec, "plugins").split()
except:
pass
print >> sys.stderr, "Loading plugins: %s" % self.plugin_list
try:
for plugin in self.plugin_list:
print >> sys.stderr, "Loading plugin: %s" % (plugin)
mod = __import__(plugin) # import the module
cls = getattr(mod, plugin) # name the class so we can call it
self.plugins.append(cls(self.debug, self.getPluginConf(plugin))) # create an instance of the class
except Exception, e:
print >> sys.stderr, "Failed to load plugin: %s (%s)" % (Exception, e)
sys.exit(4) # should it be this serious?
import string
self.sla=float(cp.get(sec, "sla_ms"))/1000.0 # self.sla is seconds
try:
self.nologsleep=int(cp.get(sec, "nologsleep"))
except:
self.nologsleep=10
try:
self.notify_schedule=int(cp.get(sec, "notify_schedule"))
except:
self.notify_schedule=60
try:
self.debug=int(cp.get(sec, "debug"))
except:
pass
#print "DEBUG: %d" % self.notify_schedule
self.regex["processing_time"] = re.compile(cp.get(sec, "processing_time_regex"))
self.processing_time_units=cp.get(sec, "processing_time_units")
self.use_brand=0
try:
use_brand=int(cp.get(sec, "use_brand"))
if use_brand == 1:
self.use_brand=1
except:
pass
if self.use_brand == 1:
self.regex["brand"] = re.compile(cp.get(sec, "brand_regex"))
if self.distinguisher==None:
try:
self.distinguisher=cp.get(sec, "distinguisher")
except:
pass
# read in the metrics to prime
try:
self.metrics_prime_list=cp.get(sec, "metrics_prime").split(" ")
except:
self.metrics_prime_list=()
# read in the Count metrics, and optionally, the ratio metrics
self.metrics_count_list=cp.get(sec, "metrics_count").split(" ")
try:
self.metrics_ratio_list=cp.get(sec, "metrics_ratio").split(" ")
except:
self.metrics_ratio_list=()
for metric in self.metrics_count_list:
self.regex[metric]=re.compile(cp.get(sec, "metric_%s_regex" % metric))
# read in the Sum metrics; these can be ratio metrics as well!
try:
self.metrics_sum_list=cp.get(sec, "metrics_sum").split(" ")
to_remove=[]
for metric in self.metrics_sum_list:
try:
self.regex[metric]=re.compile(cp.get(sec, "metric_%s_regex" % metric))
except:
print "ERROR: Failed to find metric_%s_regex!" % metric
# remove it after we leave the loop
to_remove.append(metric)
for tr in to_remove:
self.metrics_sum_list.remove(tr)
except Exception, e:
print "ERROR: error reading metrics_sum: %s" % e
self.metrics_sum_list=()
# read in the calc metrics
try:
self.metrics_calc_list=cp.get(sec, "metrics_calc").split(" ")
for metric in self.metrics_calc_list:
try:
self.metric_calc_expr[metric]=cp.get(sec, "metric_%s_expression" % metric)
except:
print "ERROR: Failed to find metric_%s_regex!" % metric
self.metrics_calc_list.remove(metric)
except:
self.metrics_calc_list=()
# read in the distribution metrics
try:
self.metrics_dist_list=cp.get(sec, "metrics_dist").split(" ")
for metric in self.metrics_dist_list:
try:
self.metric_dist_bucketsize[metric]=int(cp.get(sec, "metric_%s_bucket_size" % metric))
self.metric_dist_bucketcount[metric]=int(cp.get(sec, "metric_%s_bucket_count" % metric))
self.regex[metric]=re.compile(cp.get(sec, "metric_%s_regex" % metric))
except Exception, e:
print "ERROR: Failed to set up metric_%s_regex! (%s)" % (metric, e)
self.metrics_dist_list.remove(metric)
except:
self.metrics_dist_list=()
# Get the ignore pattern. We'll completely ignore (but count) any matching lines.
try:
self.ignore_pattern=cp.get(sec, "ignore_pattern")
except:
self.ignore_pattern="^$" # safe to ignore
self.ignore=re.compile(self.ignore_pattern)
# this will be used to cleanse "found" metric names
try:
self.metric_cleaner=re.compile(cp.get(sec, "metric_cleaner"))
except:
self.metric_cleaner=re.compile("[/.:;\"\' $=]")
# STUB need some error handling for ratios that don't exist
except Exception, e:
print "failed to parse config file '%s'" % self.configfile
print "The following options are required:"
print " log_name_format"
print " sla_ms"
print " processing_time_regex"
print " use_brand"
print " brand_regex"
print " metrics_count"
print " metric_<metric_name>_regex for any metric listed in metrics_count"
print "Root error: %s" % e
sys.exit(1)
if self.testconfig:
self.readTestConfig()
def readGraphiteConf(self):
conf = "/etc/graphite.conf"
if self.debug:
print >> sys.stderr, "DEBUG: readGraphiteConf() called"
sec="graphite"
try:
cp = ConfigParser.ConfigParser()
cp.read(conf)
self.graphite_server=cp.get(sec, "server")
return self.graphite_server
except Exception, e:
print "Failed to read %s (%s)" % (conf, e)
return None
def getPluginConf(self, plugin):
if self.debug:
print >> sys.stderr, "DEBUG: getPluginConf(%s) called" % plugin
sec=plugin
if self.configfile == None:
return 0
try:
cp = ConfigParser.ConfigParser()
cp.read(self.configfile)
return dict(cp.items(plugin))
except:
return {}
def lockPID(self):
pid=self.getPID()
if pid == -1: # not using pidfile
return 1
elif pid == 0: # no pidfile
atexit.register(self.removePID)
f = open(self.pidfile, "w")
f.write("%d" % os.getpid())
f.close()
return 1
else:
print "PID is %d" % pid
return 0
if os.path.exists(self.pidfile):
return 0
def removePID(self):
try:
os.unlink(self.pidfile)
except:
print "unable to unlink pidfile!"
def getPID(self):
if not self.pidfile:
return -1
if os.path.exists(self.pidfile):
f = open(self.pidfile)
p = f.read()
f.close()
return int(p)
else:
return 0
def prime_metrics(self):
for pair in self.metrics_prime_list:
try:
pmetric,val = pair.split(":")
m=gMetric("float", "%s%s" % (self.prefix,pmetric), "prime", self.notify_schedule,self.graphite_server,self.debug)
m.send(float(val),1)
self.total_metric_count += 1
except Exception, e:
print >> sys.stderr, "Failed to send prime metric %s (%s)" % (pair, e)
def notifybrand(self, brand, seconds):
if self.graphite_server:
from graphitelib import gMetric
else:
from gmetriclib import gMetric
try:
if not self.gmetric_brands.has_key(brand):
self.gmetric_brands[brand]=gMetric("float", "%sQPS_%s" % (self.prefix,brand), "qps", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric_brands[brand].send(float(self.brand_counts[brand]/seconds), 1)
self.total_metric_count += 1
except Exception, e:
print "couldn't notify for brand %s (%s)" % (brand, e)
def notify(self, seconds):
if self.graphite_server:
from graphitelib import gMetric
from graphitelib import sendMetrics
else:
from gmetriclib import gMetric
self.notify_time_start=time.time()
#print time.strftime("%H:%M:%S")
if self.pt_requests > 0:
self.gmetric["TPT"].send(self.processing_time, 1)
#print "%.2f / %d" % (self.processing_time,self.pt_requests)
self.gmetric["APT"].send(self.processing_time/self.pt_requests, 1)
self.gmetric["MAX"].send(self.max_processing_time, 1)
self.gmetric["SLA"].send(self.pt_requests_exceeding_sla*100.0/self.pt_requests, 1)
self.gmetric["SLA_ct"].send(self.pt_requests_exceeding_sla, 1)
else:
self.gmetric["TPT"].send(0.0, 1)
self.gmetric["APT"].send(0.0, 1)
self.gmetric["MAX"].send(0.0, 1)
self.gmetric["SLA"].send(0.0, 1)
self.gmetric["SLA_ct"].send(0.0, 1)
if seconds > 0:
qps=float(self.requests/seconds)
else:
qps=0.0
self.gmetric["Q"].send(self.requests, 1)
self.gmetric["QPS"].send(qps, 1)
#print self.processing_time
self.total_metric_count += 7
#print "covered %d, requests %d" % (self.covered,self.requests)
if self.requests > 0:
coverage_per_query=self.covered*100.0/self.requests
else:
coverage_per_query=0.0
#print "served %d, possible %d" % (self.inventory_served,self.inventory_possible)
if self.inventory_possible > 0:
coverage_per_ad_requested=self.inventory_served*100.0/self.inventory_possible
else:
coverage_per_ad_requested=0.0
#self.gmetric_cpq.send(coverage_per_query, 1)
#self.gmetric_cpar.send(coverage_per_ad_requested, 1)
self.gmetric["code_version"].send("\"%s\"" % code_version, 0)
self.gmetric["ignore"].send(self.ignored_count, 1)
self.total_metric_count += 2
for brand in self.brand_counts.keys():
self.notifybrand(brand,seconds)
for rmetric in self.metrics_ratio_list:
tot=0
regex=re.compile("^%s" % rmetric)
for smetric in self.metric_sums.keys():
rmetric_name="%s_ratio" % smetric
if re.match(regex, smetric):
if self.requests != 0:
# we don't want to multiply by 100 for sum ratios
perc=float(self.metric_sums[smetric])/float(self.requests)
else:
perc=0.0
try:
self.gmetric[rmetric_name].send(perc,1)
except: #sketchy
self.gmetric[rmetric_name]=gMetric("float", "%s%s" % (self.prefix,rmetric_name), "percent", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric[rmetric_name].send(perc,1)
self.total_metric_count += 1
for cmetric in self.metric_counts.keys():
if re.match(regex, cmetric):
tot=tot+self.metric_counts[cmetric]
#print "TOTAL %d" % tot
for cmetric in self.metric_counts.keys():
rmetric_name="%s_ratio" % cmetric
if re.match(regex, cmetric):
if tot!=0:
perc=float(self.metric_counts[cmetric])/float(tot) * 100
else:
perc=0.0
#print "%s %s %.2f" % (self.metric_counts[cmetric], cmetric, perc)
try:
self.gmetric[rmetric_name].send(perc,1)
except: #sketchy
self.gmetric[rmetric_name]=gMetric("float", "%s%s" % (self.prefix,rmetric_name), "percent", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric[rmetric_name].send(perc,1)
self.total_metric_count += 1
# send smetrics
for smetric in self.metric_sums.keys():
#print "DEBUG: sending %.2f" % self.metric_sums[smetric]
try:
self.gmetric[smetric].send(self.metric_sums[smetric],1)
except: #sketchy
self.gmetric[smetric]=gMetric("float", "%s%s" % (self.prefix,smetric), "sum", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric[smetric].send(self.metric_sums[smetric],1)
self.total_metric_count += 1
# send cmetrics
for cmetric in self.metric_counts.keys():
#print "DEBUG: sending %.2f" % self.metric_counts[cmetric]
try:
self.gmetric[cmetric].send(self.metric_counts[cmetric],1)
except: #sketchy
self.gmetric[cmetric]=gMetric("float", "%s%s" % (self.prefix,cmetric), "count", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric[cmetric].send(self.metric_counts[cmetric],1)
self.total_metric_count += 1
# send emetrics/calcs
for emetric in self.metric_calcs.keys():
try:
cvalue=self.calculate(self.metric_calc_expr[emetric])
except Exception, e:
print Exception, e
cvalue=0
#print "DEBUG: emetric sending %.2f for %s" % (cvalue, emetric)
try:
self.gmetric[emetric].send(cvalue,1)
except Exception, e: #sketchy, create then send instead of pre-initializing
self.gmetric[emetric]=gMetric("float", "%s%s" % (self.prefix,emetric), "expression", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric[emetric].send(cvalue,1)
self.total_metric_count += 1
# send dmetrics
for dmetric in self.metric_dists.keys():
regex=re.compile("^%s" % rmetric)
# Let's do the ratio metrics in-line here
do_ratio=False
if re.match(regex, dmetric):
do_ratio=True
last=0
for bucket in range(self.metric_dist_bucketcount[dmetric]):
current=last+self.metric_dist_bucketsize[dmetric]
# first bucket
if last == 0:
dmetric_b="%s_%d-%d" % (dmetric, 0, current-1)
# last bucket
elif bucket == self.metric_dist_bucketcount[dmetric]-1:
dmetric_b="%s_%d-%s" % (dmetric, last, "inf")
# other buckets
else:
dmetric_b="%s_%d-%d" % (dmetric, last, current-1)
last=current
#print dmetric_b,self.metric_dists[dmetric][bucket]
#print "DEBUG: sending %.2f" % self.metric_counts[dmetric_b][bucket]
try:
self.gmetric[dmetric_b].send(self.metric_counts[dmetric_b],1)
except: #sketchy
self.gmetric[dmetric_b]=gMetric("float", "%s%s" % (self.prefix,dmetric_b), "count", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric[dmetric_b].send(self.metric_dists[dmetric][bucket],1)
self.total_metric_count += 1
if self.requests != 0:
# we don't want to multiply by 100 for sum ratios
perc=float(self.metric_dists[dmetric][bucket])/float(self.requests) * 100
#perc=float(self.metric_counts[cmetric])/float(tot) * 100 # do we need to count matches (tot)?
else:
perc=0.0
try:
self.gmetric[dmetric_b+"_ratio"].send(perc,1)
except: #sketchy
self.gmetric[dmetric_b+"_ratio"]=gMetric("float", "%s%s_ratio" % (self.prefix,dmetric_b), "percent", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric[dmetric_b+"_ratio"].send(perc,1)
self.total_metric_count += 1
# send plugin metrics
for p in self.plugins:
try:
pmetrics = p.get_metrics()
except Exception, e:
print >> sys.stderr, "WARNING: %s.get_metrics() failed. (%s)" % (p.__class__.__name__, e)
continue
for pmetric in pmetrics.keys():
pmn = "plugins.%s.%s" % (p.__class__.__name__, pmetric)
try:
self.gmetric[pmn].send(pmetrics[pmetric],1)
except: #sketchy
self.gmetric[pmn]=gMetric("float", "%s%s" % (self.prefix,pmn), "count", self.notify_schedule,self.graphite_server,self.debug)
self.gmetric[pmn].send(pmetrics[pmetric],1)
self.total_metric_count += 1
self.gmetric["LOG_TIME"].send(self.log_time,1)
self.gmetric["NEW_METRICS"].send(self.new_metric_count,1)
self.total_metric_count += 3 # includes the next line
self.gmetric["TOTAL_METRICS"].send(self.total_metric_count,1)
if self.graphite_server:
buffer = ""
for m in self.gmetric:
buffer += "%s\n" % self.gmetric[m].pop()
for m in self.gmetric_brands:
buffer += "%s\n" % self.gmetric_brands[m].pop()
sendMetrics(buffer, self.graphite_server)
# after sending batch, stop the timer
self.notify_time=time.time() - self.notify_time_start
# ...the one place where we changed the call for graphite
if self.graphite_server:
self.gmetric["NOTIFY_TIME"].send(self.notify_time,autocommit=True)
else:
self.gmetric["NOTIFY_TIME"].send(self.notify_time,1)
if self.quit:
print "Metrics complete."
sys.exit(0)
self.initialize_counters()
def initialize_counters(self):
# processing_time
self.processing_time=0
self.max_processing_time=0
self.requests=0
self.pt_requests=0
self.pt_requests_exceeding_sla=0
for brand in self.brand_counts.keys():
self.brand_counts[brand]=0
for cmetric in self.metric_counts.keys():
self.metric_counts[cmetric]=0
for smetric in self.metric_sums.keys():
self.metric_sums[smetric]=0
# this one is different, since the dict isn't created while reading the log
for emetric in self.metrics_calc_list:
self.metric_calcs[emetric]=0
for dmetric in self.metrics_dist_list:
self.metric_dists[dmetric]={}
for bucket in range(self.metric_dist_bucketcount[dmetric]):
self.metric_dists[dmetric][bucket]=0
# coverage
self.inventory_possible=0
self.covered=0
self.inventory_served=0
self.ignored_count=0
self.notify_time=0
self.log_time=0
self.new_metric_count=0
self.total_metric_count=0
def logbrand(self,brand,pt=None,coverate=None):
if self.brand_counts.has_key(brand):
self.brand_counts[brand]+=1
else:
self.brand_counts[brand]=1
if self.debug:
print >> sys.stderr, "DEBUG: Found new publisher: %s" % brand
self.new_metric_count+=1
def openlog(self):
try:
if self.fd:
self.fd.close()
if self.debug:
print >> sys.stderr, "DEBUG: closing existing logfile"
except:
print "close() failed"
try:
self.fd=open(self.log, 'r')
if self.debug:
print >> sys.stderr, "DEBUG: opening logfile %s" % self.log
print "DEBUG: log count = %d" % self.log_count
# go to end of the log unless we override (w/beginning) or ARE in the first log
if ((not self.beginning) and (self.log_count == 0)):
self.fd.seek(0,2)
if self.debug:
print >> sys.stderr, "DEBUG: GOING TO THE END"
self.log_count+=1
self.curr_pos=self.prev_pos=self.fd.tell()
self.curr_inode=os.stat(self.log)[1]
if self.debug:
print >> sys.stderr, "DEBUG: current position is %d" % self.curr_pos
except Exception, e:
print "Error in openlog(): "+str(e)
sys.exit(9)
def setlogname(self):
nowfile=time.strftime(self.logformat)
if nowfile == self.log:
#print "existing log"
# should return 1 if log filename changed OR if inode changed!
try:
filename_inode=os.stat(nowfile)[1]
if self.curr_inode != filename_inode:
return 1
except Exception, e:
# file probably renamed, but no new one yet
pass
return 0
if os.path.exists(nowfile):
if self.debug:
print >> sys.stderr, "DEBUG: FOUND A NEW LOGFILE, we should switch (after finishing)"
self.log=nowfile
return 1
return 0
"""
warning to sdterr
"""
def send_warning(self, msg):
print >> sys.stderr, "WARNING: %s" % msg
"""
this will replace variables with values in an expression
unknown items will be replaced with '_unknown_', forcing an exception at calculate() time
"""
def parse_expression(self, expression):
nexpression=""
try:
for bit in expression.split(" "):
try:
value=float(bit)
except:
ValueError, TypeError
if bit[:2] == "s/":
try:
value=self.metric_sums[bit[2:]]
except:
self.send_warning("in parse_expression() value for %s not found" % bit)
value=0
elif bit[:2] == "c/":
try:
value=self.metric_counts[bit[2:]]
except:
self.send_warning("in parse_expression() value for %s not found" % bit)
print self.metric_counts.keys()
value=0
# allow any object property to be used
elif bit[:2] == "i/":
try:
value=getattr(self,bit[2:])
except:
self.send_warning("in parse_expression() value for %s not found" % bit)
value=0
elif bit in ('/', '+', '-', '*'):
value=bit
else:
value="_unknown_"
nexpression="%s %s" % (nexpression, value)
except Exception, e:
print "Exception in parse_expression(): %s (%s)" % (Exception, e)
nexpression="-1"
return nexpression
"""
evaluate a parsed user-configured expression
"""
def calculate(self, expression):
try:
if self.debug:
print >> sys.stderr, "calculate(%s)" % self.parse_expression(expression)
value=eval(self.parse_expression(expression))
except ZeroDivisionError, e:
#print "Division by zero in calculate(%s)" % expression
value=0
except Exception, e:
value=-1
print >> sys.stderr, "Exception in calculate(): %s (expression: '%s')" % (e, expression)
return value
"""
watch the log file for new lines
"""
def watch(self):
# save_line is a buffer for saving a partial line at the end of a read
save_line=""
finish_counter = 0 # make sure we finished the previous file
finish_tries = 3 # make sure we finished the previous file
line = None
while 1:
now=time.time()
if self.last_time+self.notify_schedule<=now:
self.notify(now-self.last_time)
self.last_time=now
time.sleep(1)
if self.setlogname() == 1:
# we'll switch to the new log after trying the last log finish_tries times
finish_counter += 1
if self.debug:
print >> sys.stderr, "DEBUG: Last line was %s (try %d)" % (line, finish_counter)
if self.fd == None or finish_counter >= finish_tries:
self.openlog()
finish_counter = 0
elif (self.fd == None):
print "ERROR: logfile %s not opened, sleeping %ds" % (self.log, self.nologsleep)
time.sleep(self.nologsleep)
continue
notify_msg=""
found=0
# start the timer
self.log_time_start=time.time()
lines=self.fd.readlines()
if self.debug > 0:
print >> sys.stderr, "DEBUG: readlines() returned %d lines" % len(lines)
for line in lines:
# if we have a partial line from last time, use it
if len(save_line) > 0:
if self.debug:
print >> sys.stderr, "DEBUG: Reassembling Line: %s||+||%s" % (save_line, line)
line=save_line+line
save_line=""
# make sure it's a complete line before continuing
if line[-1:] == '\n':
# check for lines to ignore before doing anything else
try:
if self.ignore.search(line):
#print "Ignoring: %s" % line
self.ignored_count+=1
continue
except Exception, e:
print "Exception: %s" % e
# handle plugins
for p in self.plugins:
try:
p.process_line(line)
except Exception, e:
print >> sys.stderr, "Failed to call process_line on plugin %s (%s: %s)" % (p.__class__.__name__, Exception, e)
try:
self.requests+=1
#print self.requests
# we will also count lines that didn't match, for proper ratio
for cmetric in self.metrics_count_list:
m=self.regex[cmetric].search(line)
if m != None:
# to make this ganglia-safe, need to encode or otherwise
# clean the second argument
key="%s_%s" % (cmetric, self.metric_cleaner.sub("_", m.group(1)))
else:
key="%s_%s" % (cmetric, "NotSet")
try:
self.metric_counts[key]+=1
except Exception, e:
self.metric_counts[key]=1
if self.debug:
print >> sys.stderr, "DEBUG: Found new count metric: %s" % (key)
self.new_metric_count+=1
# this is just like processing_time, but without s/ms support
for smetric in self.metrics_sum_list:
m=self.regex[smetric].search(line)
if m != None:
value=float(m.group(1))
try:
self.metric_sums[smetric]+=value
except Exception, e:
self.metric_sums[smetric]=value
if self.debug:
print >> sys.stderr, "DEBUG: Found new sum metric: %s" % (smetric)
self.new_metric_count+=1
# search for distribution metrics
for dmetric in self.metrics_dist_list:
m=self.regex[dmetric].search(line)
if m != None:
value=int(m.group(1))
bucket=value / self.metric_dist_bucketsize[dmetric]
#print >> sys.stderr, "%d -> %d" % (value, bucket)
if bucket > self.metric_dist_bucketcount[dmetric]-1:
bucket=self.metric_dist_bucketcount[dmetric]-1
try:
self.metric_dists[dmetric][bucket]+=1
except Exception, e:
self.metric_dists[dmetric][bucket]=1
# processing_time
m=self.regex["processing_time"].search(line)
if m != None:
pt=float(m.group(1))
if self.processing_time_units == "ms":
pt = pt / 1000.0
elif self.processing_time_units == "us":
pt = pt / 1000.0 / 1000.0
self.processing_time+=pt
if pt > self.max_processing_time:
self.max_processing_time=pt
if pt > self.sla:
self.pt_requests_exceeding_sla+=1
self.pt_requests+=1
if self.use_brand:
# brand (how about pt/brand?)
m=self.regex["brand"].search(line)
if m != None:
brand=m.group(1)
else:
brand="NULL_brand"
self.logbrand(brand)
except Exception, e:
if self.debug > 0:
print "Continuing after exception [3]: %s" % e
continue
else:
# incomplete line: save
save_line=line
if self.debug:
print >> sys.stderr, "DEBUG: Incomplete Line, saving: %s" % (save_line)
self.prev_pos=self.curr_pos
# add to the timer
self.log_time+=time.time() - self.log_time_start
def handleSignal(signum, frame):
print "\nLogWatcher killed"
sys.exit(signum)
def procdaemonize (stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
# Do first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError, e:
sys.stderr.write ("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except OSError, e:
sys.stderr.write ("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Now I am a daemon!
# Redirect standard file descriptors.
si = file(stdin, 'r')
so = file(stdout, 'a+')
se = file(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def main(argv):
import getopt, sys
try:
opts, args = getopt.getopt(argv, "VDdg:Gvhpc:i:qbt", ["verbose", "debug", "daemonize", "graphite-server", "use-graphite", "version", "help", "pidfile=", "config=", "distinguisher=","quit","beginning","testconfig"])
except:
usage()
pidfile=None
configfile=None
daemonize=0
debug=0
distinguisher=None
quit=False
beginning=False
testconfig=False
graphite_server=None
use_graphite=False
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(0)
if opt in ("-v", "--version"):
print code_version
sys.exit(0)
if opt in ("-p", "--pidfile"):
pidfile=arg
if opt in ("-i", "--distinguisher"):
distinguisher=arg
if opt in ("-c", "--config"):
configfile=arg
if opt in ("-g", "--graphite-server"):
graphite_server=arg
if opt in ("-G", "--use-graphite"):
use_graphite=True
if opt in ("-d", "--daemonize"):
daemonize=1
if opt in ("-D", "--debug"):
debug=1
if opt in ("-V", "--verbose"):
debug=2
if opt in ("-q", "--quit"):
quit=True
if opt in ("-b", "--beginning"):
beginning=True
if opt in ("-t", "--testconfig"):
testconfig=True
lw=LogWatcher(pidfile, daemonize, configfile, distinguisher, debug, quit, beginning,testconfig,graphite_server, use_graphite)
def usage():
print "usage: %s [-h] [-v] [-D] [-V] [-d] [ -c configfile ] [-i <distinguisher>] [-p <pidfile>] [-q] [-b] [-t]" % sys.argv[0]
print " -h --help Print this message"
print " -v --version Print the version"
print " -D --debug Don't send metrics, just print them"
print " -g --graphite-server <s> Use graphite, with server <s>"
print " -G --use-graphite Use graphite, find server in /etc/graphite.conf"
print " -V --verbose Print gmetric commands as they're sent. Disables -D"
print " -d --daemonize Run in the background"
print " -c --config <file> Use the given configuration file"
print " -i --distinguisher <dis> Use the given string in the metric names"
print " -p --pidfile <file> Store the PID in the given file"
print " -q --quit Quit after sending metrics (useful with -D)"
print " -b --beginning Read the log from the beginning (useful with -q)"
print " -t --testconfig Read overrides from the \"test\" section of the configuration file"
if __name__ == "__main__":
signal.signal(signal.SIGINT, handleSignal)
main(sys.argv[1:]) | PypiClean |
/Nuitka_winsvc-1.7.10-cp310-cp310-win_amd64.whl/nuitka/tools/testing/check_reference_counts/__main__.py | import os
import sys
from optparse import OptionParser
from nuitka.PythonVersions import isDebugPython
from nuitka.tools.testing.Common import checkReferenceCount, getTempDir
from nuitka.Tracing import my_print
from nuitka.utils.Execution import check_call
from nuitka.utils.Importing import importFileAsModule
def main():
parser = OptionParser()
parser.add_option(
"--checked-module",
action="store",
dest="checked_module",
help="""\
Module with main() function to be checked for reference count stability.""",
)
parser.add_option(
"--explain",
action="store_true",
dest="explain",
default=False,
help="""\
Try to explain the differences by comparing object counts.""",
)
options, positional_args = parser.parse_args()
if positional_args and options.checked_module is None:
options.checked_module = positional_args.pop()
if positional_args and options.checked_module:
parser.print_help()
sys.exit("\nError, no positional argument allowed.")
# First with pure Python.
checked_module = importFileAsModule(options.checked_module)
my_print("Using %s" % checked_module.main, style="blue")
checkReferenceCount(checked_module.main, explain=options.explain)
temp_dir = getTempDir()
command = [
sys.executable,
"-m",
"nuitka",
"--module",
options.checked_module,
"--output-dir=%s" % temp_dir,
]
if isDebugPython():
command.append("--python-debug")
check_call(command)
module_name = os.path.basename(options.checked_module).split(".")[0]
sys.path.insert(0, temp_dir)
checked_module = __import__(module_name)
my_print("Using %s" % checked_module.main, style="blue")
checkReferenceCount(checked_module.main)
if __name__ == "__main__":
nuitka_package_dir = os.path.normpath(
os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".."))
)
# Unchanged, running from checkout, use the parent directory, the nuitka
# package ought be there.
sys.path.insert(0, nuitka_package_dir)
main() | PypiClean |
/NEMO_billing-2.6.7-py3-none-any.whl/NEMO_billing/rates/rates_class.py | from itertools import groupby
from typing import Dict, Iterable, List
from NEMO.models import Consumable, Tool
from NEMO.rates import Rates
from NEMO.utilities import distinct_qs_value_list
from django.conf import settings
from django.utils.formats import number_format
from NEMO_billing.invoices.models import InvoiceConfiguration
from NEMO_billing.invoices.processors import get_rate_with_currency
from NEMO_billing.rates.models import Rate, RateCategory
class DatabaseRates(Rates):
expand_rates_table = False
def __init__(self):
self.currency = getattr(settings, "RATE_CURRENCY", "$")
self.configuration = InvoiceConfiguration.first_or_default()
def load_rates(self, force_reload=False):
super().load_rates(force_reload=force_reload)
def get_consumable_rates(self, consumables: List[Consumable]) -> Dict[str, str]:
return {
rate.consumable.name: self.consumable_rate_display(rate)
for rate in Rate.non_deleted().filter(consumable__in=consumables)
}
def get_consumable_rate(self, consumable: Consumable) -> str:
consumable_rate = Rate.non_deleted().get(consumable=consumable)
if consumable_rate.exists():
return self.consumable_rate_display(consumable_rate)
def get_tool_rate(self, tool: Tool) -> str:
all_tool_rates = Rate.non_deleted().filter(tool=tool).order_by("type", "category")
if not all_tool_rates:
return ""
list_by_type = groupby(all_tool_rates, key=lambda x: x.type)
rate_categories = distinct_qs_value_list(all_tool_rates, "category")
rate_categories = sorted([RateCategory.objects.get(id=cat_id).name for cat_id in rate_categories if cat_id])
html_rate = f'<div class="media"><a onclick="toggle_details(this)" class="pointer collapsed" data-toggle="collapse" data-target="#rates_details"><span class="glyphicon glyphicon-list-alt pull-left notification-icon primary-highlight"></span><span class="glyphicon pull-left chevron glyphicon-chevron-{"down" if self.get_expand_rates_table() else "right"}"></span></a>'
html_rate += f'<div class="media-body"><span class="media-heading">Rates</span><div id="rates_details" class="collapse {"in" if self.get_expand_rates_table() else ""}"><table class="table table-bordered table-hover thead-light" style="width: auto !important; margin-bottom: 0">'
if rate_categories:
html_rate += '<tr><th class="text-center"></th><th class="text-center">'
html_rate += '</th><th class="text-center">'.join(rate_categories)
html_rate += "</tr>"
for rate_type, tool_rates in list_by_type:
html_rate += (
f'<tr><th class="text-center" style="vertical-align: middle">{rate_type.get_type_display()}</th>'
)
if not rate_type.category_specific or not RateCategory.objects.exists():
html_rate += f'<td class="text-center" style="vertical-align: middle" colspan="{len(rate_categories)}">{self.tool_rate_display_with_details(tool_rates)}</td>'
else:
for rate_category in rate_categories:
tool_rate_category = all_tool_rates.filter(type=rate_type, category__name=rate_category)
html_rate += f'<td class="text-center" style="vertical-align: middle">{self.tool_rate_display_with_details(tool_rate_category)}</td>'
html_rate += "</tr></table></div></div></div>"
return html_rate
def tool_rate_display_with_details(self, rates: Iterable[Rate]):
return "<br>".join(
[
f"{get_rate_with_currency(self.configuration, rate.display_rate())}{' (' + rate.time.name + ')' if rate.time else ''}"
for rate in rates
]
)
def consumable_rate_display(self, rate: Rate) -> str:
return f"<b>{self.display_amount(rate.amount)}</b>"
def display_amount(self, amount):
return f"{self.currency}{number_format(amount, decimal_pos=2)}" | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/pkg_resources/_vendor/pyparsing.py |
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
- construct character word-group expressions using the L{Word} class
- see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
- use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
- associate names with your parsed results using L{ParserElement.setResultsName}
- find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
- find more useful common expressions in the L{pyparsing_common} namespace class
"""
__version__ = "2.2.1"
__versionTime__ = "18 Sep 2018 00:49 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
# Python 3
from collections.abc import Iterable
from collections.abc import MutableMapping
except ImportError:
# Python 2.7
from collections import Iterable
from collections import MutableMapping
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex(r'&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def _iterkeys( self ):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues( self ):
return (self[k] for k in self._iterkeys())
def _iteritems( self ):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys( self ):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items( self ):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
return [frame_summary[:2]]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""
Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(cache) > size:
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(key_fifo) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other ):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return self + And._ErrorStop() + other
def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny( self )
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace(r'\n','\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=None, caseless=False ):
super(Keyword,self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__( self, matchString, identChars=None ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch,self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl( self, instring, loc, doActions=True ):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
src,mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = self.match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except Exception:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
r"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__( self ):
super(LineStart,self).__init__()
self.errmsg = "Expected start of line"
def parseImpl( self, instring, loc, doActions=True ):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__( self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s,l,t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted); if the parse action
is passed a tuple or list of functions, this is equivalent to
calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ Optional( White(" \t") ) ) ).streamline().setName("commaItem")
comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""") | PypiClean |
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/vgg.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
def vgg_a(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
reuse=None,
scope='vgg_a',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_a', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_a.default_image_size = 224
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
reuse=None,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(
scope, 'vgg_16', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_16.default_image_size = 224
def vgg_19(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
reuse=None,
scope='vgg_19',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0 or
None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(
scope, 'vgg_19', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_19.default_image_size = 224
# Alias
vgg_d = vgg_16
vgg_e = vgg_19 | PypiClean |
/Gpxity-1.7.2-py3-none-any.whl/gpxity/util.py |
# Copyright (c) 2019 Wolfgang Rohdewald <wolfgang@rohdewald.de>
# See LICENSE for details.
"""This module defines some helpers."""
import os
import datetime
import time
import logging
import curses
from math import isclose
from gpxpy.geo import length as gpx_length
__all__ = ['Duration', 'repr_timespan', 'uniq', 'remove_directory', 'is_gpxfile', 'collect_gpxfiles',
'positions_equal', 'pairs', 'add_speed', 'utc_datetime', 'local_datetime', 'ColorStreamHandler']
class ColorStreamHandler(logging.Handler):
"""Color logging."""
def __init__(self):
"""init."""
logging.Handler.__init__(self)
# Get the foreground color attribute for this environment
self.fcap = curses.tigetstr('setaf')
# Get the normal attribute
self.normal_color = curses.tigetstr('sgr0').decode("utf-8")
# Get + Save the color sequences
colors = (
(logging.INFO, curses.COLOR_GREEN),
(logging.DEBUG, curses.COLOR_BLUE),
(logging.WARNING, curses.COLOR_YELLOW),
(logging.ERROR, curses.COLOR_RED),
(logging.CRITICAL, curses.COLOR_BLACK))
self.colors = {x[0]: curses.tparm(self.fcap, x[1]).decode('utf-8') for x in colors} # noqa
def color(self, msg, level):
"""Color the message according to logging level.
Returns: The colored message."""
try:
return self.colors[level] + msg + self.normal_color
except Exception: # pylint: disable=broad-except
return msg
def emit(self, record):
"""Output the message."""
msg = self.format(record)
msg = self.color(msg, record.levelno)
print(msg + '\r')
class Duration:
"""A context manager showing time information for debugging."""
# pylint: disable=too-few-public-methods
def __init__(self, name):
"""See class docstring."""
self.name = name
self.start_time = datetime.datetime.now()
def __enter__(self):
"""See class docstring.
Returns:
self
"""
return self
def __exit__(self, exc_type, exc_value, trback):
"""See class docstring."""
logging.debug(
'%s in %s %s-%s',
datetime.datetime.now() - self.start_time,
self.name, self.start_time, datetime.datetime.now())
def repr_timespan(start, end) ->str:
"""return a string representing the timespan.
Returns:
a string like #h#m
"""
duration = end - start
hours = duration.seconds // 3600
minutes = (duration.seconds - hours * 3600) // 60
hours += duration.days * 24
return '{}:{:02}'.format(hours, minutes)
def uniq(lst):
"""return lst with unique elements."""
seen = []
for _ in lst:
if _ not in seen:
seen.append(_)
yield _
def remove_directory(path):
"""If this fails, show directory content."""
try:
os.rmdir(path)
except FileNotFoundError:
logging.error("REMOVE_DIRECTORY %s: not found", path)
raise
except OSError as exc:
logging.error('rmdir: errno: %s cannot remove directory: %s', exc, path)
if os.path.exists(path):
for _ in os.listdir(path):
logging.error(' dir still has %s', _)
def is_gpxfile(value):
"""Return True or False without looking at the type, so we do not need to import GpxFile."""
return hasattr(value, 'id_in_backend')
def collect_gpxfiles(sources):
"""A copied list with gpxfiles combined from all sources, to be used in 'for'-loops.
Returns:
A list of gpxfiles
"""
if is_gpxfile(sources):
return [sources]
result = list()
for source in sources:
if is_gpxfile(source):
result.append(source)
else:
logging.debug('')
logging.debug('collecting gpxfiles from %s %s', source.account.backend, source)
result.extend(source)
return result
def positions_equal(pos1, pos2, digits=4): # pylint: disable=unused-argument
"""Check if both points have the same position.
Args:
digits: Number of after comma digits to compare
Returns:
True if so
"""
_ = 1 / 10 ** digits
return isclose(pos1.longitude, pos2.longitude, rel_tol=_) and isclose(pos1.latitude, pos2.latitude, rel_tol=_)
def add_speed(points, window: int = 2):
"""Add speed to points in m/sec.
It uses the last X points for computation: distance and time
between [current_point - window] an [current_point]
Args:
window: The number of last points to consider.
"""
if not points:
return
points[0].gpxity_speed = 0.0
start_idx = 0
for _, point in enumerate(reversed(points)): # noqa
if hasattr(point, 'gpxity_speed'):
start_idx = _
break
start_idx = len(points) - start_idx
if start_idx == len(points):
return
target_points = points[start_idx:]
for idx, target_point in enumerate(target_points):
window_start = max(idx + start_idx - window, 0)
start_point = points[window_start]
window_distance = gpx_length(points[window_start:start_idx + idx])
delta = (target_point.time - start_point.time)
window_time = delta.days * 86400.0 + delta.seconds + delta.microseconds / 1000000.0
if window_time:
window_speed = window_distance / window_time
else:
window_speed = 0.0
target_point.gpxity_speed = window_speed
def pairs(seq):
"""Return a list of all adjacent elements."""
# pylint: disable=stop-iteration-return
if seq:
iterable = iter(seq)
prev = next(iterable)
for _ in iterable:
yield prev, _
prev = _
def utc_to_local_delta():
"""The difference local - utc.
This is encapsulated because
Returns: (timedelta) the difference.
"""
return datetime.timedelta(seconds=time.localtime().tm_gmtoff)
def local_datetime(utc):
"""Convert UTC datetime to local datetime.
Returns: datetime
"""
return utc + utc_to_local_delta() if utc else None
def utc_datetime(local):
"""Convert local datetime to UTC datetime.
Returns: datetime
"""
result = local - utc_to_local_delta() if local else None
if result:
if not result.tzinfo:
result = result.replace(tzinfo=datetime.timezone.utc)
return result | PypiClean |
/CGlue-0.2.4.tar.gz/CGlue-0.2.4/cglue/data_types.py | from cglue.function import ArgumentList
from cglue.utils.dict_processor import DictProcessor
class TypeCategory:
def __init__(self, type_collection, name, attributes):
self._name = name
self._attributes = attributes
self._type_collection = type_collection
self._data_processor = DictProcessor(
required_keys=attributes.get('required', set()),
optional_keys=attributes.get('optional', {}))
def __str__(self):
return f'TypeCategory({self._name})'
def can_process(self, type_data):
for attr in self._attributes['required']:
if attr not in type_data:
return False
return True
def process_type(self, type_data):
return {
**self._attributes['static'],
**self._data_processor.process(type_data)
}
def render_typedef(self, type_name, type_data):
raise NotImplementedError
def render_value(self, type_name, type_data, value, context='assignment'):
return str(value)
@property
def name(self):
return self._name
@property
def attributes(self):
return self._attributes
def attribute(self, type_name, type_data: dict, attr_name):
"""Return a specific attribute of the given type"""
return type_data[attr_name]
def referenced_types(self, type_name, type_data):
yield type_name
class TypeAlias(TypeCategory):
def __init__(self, type_collection):
attributes = {
'required': {'aliases'},
'optional': {'default_value': None, 'pass_semantic': None},
'static': {
'type': TypeCollection.ALIAS
}
}
super().__init__(type_collection, "type_alias", attributes)
def render_typedef(self, type_name, type_data):
return f"typedef {type_data['aliases']} {type_name};"
def render_value(self, type_name, type_data, value, context='assignment'):
# call the render of aliased type
return self.aliased_type(type_data).render_value(value, context)
def attribute(self, type_name, type_data: dict, attr_name):
return type_data.get(attr_name) or self.aliased_type(type_data).get_attribute(attr_name)
def referenced_types(self, type_name, type_data):
yield type_data['aliases']
yield from super().referenced_types(type_name, type_data)
def aliased_type(self, type_data):
return self._type_collection.get(type_data['aliases'])
class BuiltinType(TypeCategory):
def __init__(self, type_collection):
attributes = {}
super().__init__(type_collection, 'builtin', attributes)
def can_process(self, type_data):
return False
def render_typedef(self, type_name, type_data):
pass
class ExternalType(TypeCategory):
def __init__(self, type_collection):
attributes = {
'required': {'defined_in', 'default_value'},
'optional': {'pass_semantic': TypeCollection.PASS_BY_VALUE},
'static': {
'type': TypeCollection.EXTERNAL_DEF
}
}
super().__init__(type_collection, 'external_type_def', attributes)
def render_typedef(self, type_name, type_data):
pass
class FunctionPointerType(TypeCategory):
def __init__(self, type_collection):
attributes = {
'required': {'return_type', 'arguments'},
'optional': {'pass_semantic': TypeCollection.PASS_BY_VALUE},
'static': {
'type': TypeCollection.FUNC_PTR
}
}
super().__init__(type_collection, 'func_ptr', attributes)
def render_typedef(self, type_name, type_data):
args = ArgumentList()
for arg_name, arg_data in type_data['arguments'].items():
args.add(arg_name, arg_data['direction'], self._type_collection.get(arg_data['data_type']))
return f"typedef {type_data['return_type']} (*{type_name})({args.get_argument_list()});"
def referenced_types(self, type_name, type_data):
yield type_data['return_type']
yield from {arg['data_type'] for arg in type_data['arguments'].values()}
yield from super().referenced_types(type_name, type_data)
class TypeWrapper:
def __init__(self, type_name, type_data, type_category, defined_by):
self._type_name = type_name
self._defined_by = defined_by
self._type_data = type_data
self._type_category = type_category
@property
def name(self):
return self._type_name
@property
def category(self):
return self._type_category
@property
def data(self):
return self._type_data
@property
def defined_by(self):
return self._defined_by
def __getitem__(self, item):
return self._type_data[item]
def __contains__(self, item):
return item in self._type_data
def get(self, item, default=None):
return self._type_data.get(item, default)
def render_value(self, value, context='assignment'):
if value is None:
value = self.default_value()
return self.category.render_value(self.name, self._type_data, value, context)
def get_attribute(self, attr_name):
return self.category.attribute(self.name, self._type_data, attr_name)
def default_value(self):
return self.get_attribute('default_value')
def passed_by(self):
return self.get_attribute('pass_semantic')
def render_typedef(self):
return self.category.render_typedef(self.name, self._type_data)
def __eq__(self, o: object) -> bool:
if type(o) is TypeWrapper:
# noinspection PyUnresolvedReferences,PyProtectedMember
o = o._type_data
return o == self._type_data
def __hash__(self) -> int:
return id(self)
def __str__(self):
return f'TypeWrapper({self._type_name}, {self._type_category})'
class TypeCollection:
BUILTIN = 'builtin'
ALIAS = 'type_alias'
EXTERNAL_DEF = 'external_type_def'
FUNC_PTR = 'func_ptr'
PASS_BY_VALUE = 'value'
PASS_BY_POINTER = 'pointer'
def __init__(self):
self._type_data = {}
self._type_categories = {}
self.add_category(TypeAlias(self))
self.add_category(BuiltinType(self))
self.add_category(ExternalType(self))
self.add_category(FunctionPointerType(self))
default_types = {
'void': {
'type': TypeCollection.BUILTIN,
'pass_semantic': TypeCollection.PASS_BY_VALUE,
'default_value': None
},
'void*': {
'type': TypeCollection.BUILTIN,
'pass_semantic': TypeCollection.PASS_BY_VALUE,
'default_value': 'NULL'
}
}
for name, data in default_types.items():
self.add(name, data, 'builtin type')
def add_category(self, info: TypeCategory):
self._type_categories[info.name] = info
def category(self, type_category):
return self._type_categories[type_category]
def add(self, type_name, info, defined_by):
try:
# if the type is already known, check if the definitions are compatible
existing_type = self.get(type_name)
print(f'Warning: Duplicate type {type_name} defined in {defined_by}, '
f'already added from {existing_type.defined_by}')
if info != existing_type:
raise Exception(f'Conflicting definitions exist for {type_name}')
except KeyError:
# type is not yet known, add it
self._type_data[type_name] = TypeWrapper(type_name, info, self._type_categories[info['type']], defined_by)
def get(self, type_name):
if type(type_name) is not str:
return (self._type_data[t] for t in type_name)
return self._type_data[type_name]
def export(self):
def strip(data):
data = data._type_data.copy()
if data['type'] in (TypeCollection.ALIAS, TypeCollection.EXTERNAL_DEF):
del data['type']
return data
return {name: strip(data) for name, data in self._type_data.items() if data['type'] != TypeCollection.BUILTIN}
def collect_type_dependencies(self, type_data: TypeWrapper):
for referenced_type_name in sorted(type_data.category.referenced_types(type_data.name, type_data)):
referenced_type = self.get(referenced_type_name)
if referenced_type != type_data:
yield from self.collect_type_dependencies(referenced_type)
else:
yield referenced_type
def normalize_type_name(self, type_name):
if type(type_name) is not str:
return (self.normalize_type_name(t) for t in type_name)
try:
self.get(type_name)
except KeyError:
type_name = type_name.replace('const ', '').replace('*', '').replace(' ', '')
return type_name
def process_type_definition(self, type_name, type_def):
if 'type' in type_def:
type_data = type_def.copy()
type_category = type_data['type']
try:
category = self._type_categories[type_category]
del type_data['type']
except KeyError as e:
raise Exception(f'Unknown type category {type_category} set for {type_name}') from e
else:
type_data = type_def
for type_category, category in self._type_categories.items():
if category.can_process(type_data):
break
else:
raise Exception(f'Invalid type definition for {type_name}, maybe missing type specifier?')
try:
return category.process_type(type_data)
except Exception as e:
raise Exception(f'Type {type_name} ({type_category}) definition is not valid: {e}') from e | PypiClean |
/GTW-1.2.6.tar.gz/GTW-1.2.6/_OMP/_PAP/_E164/Country.py |
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from _GTW import GTW
from _TFL import TFL
from _TFL.formatted_repr import formatted_repr
from _TFL.I18N import _, _T, _Tn
from _TFL.portable_repr import portable_repr
from _TFL.pyk import pyk
from _TFL.Regexp import \
(Regexp, Multi_Regexp, Re_Replacer, Multi_Re_Replacer, re)
from _TFL.Trie import Word_Trie as Trie
from _TFL._Meta.Once_Property import Once_Property
from _GTW._OMP._PAP._E164 import E164
import _GTW._OMP._PAP._E164.Error
import _TFL._Meta.Object
### Information about national numbering plans can be found at::
### http://www.itu.int/oth/T0202.aspx?parent=T0202
class M_Country (TFL.Meta.Object.__class__) :
"""Meta class for `Country`."""
def __call__ (cls, country_code) :
cc = str (country_code).lstrip ("+")
try :
name = cls.cc_map [cc]
except KeyError :
raise ValueError ("Unknown country code %s" % (cc, ))
try :
result = cls.Table [cc]
except KeyError :
args = ()
m_name = "_Country__%s" % cc
try :
module = E164._Import_Module (m_name)
C = module.Country
except ImportError :
try :
regexp = cls.ndc_sn_matcher_map [cc]
args = (regexp, )
C = Country_R
except KeyError :
C = Country_0
result = cls.Table [cc] = C.__m_super.__call__ (name, cc, * args)
return result
# end def __call__
@Once_Property
def cc_data (cls) :
from _GTW._OMP._PAP._E164 import cc_data as result
return result
# end def cc_data
@Once_Property
def cc_map (cls) :
"Dictionary mapping country codes to country names."
return cls.cc_data.cc_map
# end def cc_map
@Once_Property
def cc_matcher_fragment (cls) :
"""Regular expression fragment matching valid country code."""
sk = lambda cc : (- len (cc), cc)
ccs = sorted (cls.cc_map, key = sk)
result = "".join (("(?P<cc>", "|".join (ccs), ")"))
return result
# end def cc_matcher_fragment
@Once_Property
def cc_regexp (cls) :
return Regexp (r"^ *(?:(?:\+ *|00)?" + cls.cc_matcher_fragment + r")")
# end def cc_regexp
@Once_Property
def cc_regexp_strict (cls) :
return Regexp (r"^ *(?:(?:\+ *|00)" + cls.cc_matcher_fragment + r")")
# end def cc_regexp_strict
@Once_Property
def cc_trie (cls) :
"""Word trie of country codes."""
return Trie (cls.cc_map)
# end def cc_trie
@Once_Property
def ndc_data (cls) :
from _GTW._OMP._PAP._E164 import ndc_data as result
return result
# end def ndc_data
@Once_Property
def ndc_sn_matcher_map (cls) :
return cls.ndc_data.ndc_sn_matcher_map
# end def ndc_sn_matcher_map
def completions (cls, cc_prefix) :
"""Return all country codes starting with `cc_prefix` and unique
completion for `cc_prefix`, if any.
"""
return cls.cc_trie.completions (cc_prefix)
# end def completions
def match (cls, phone_number) :
return cls._match (phone_number, cls.cc_regexp)
# end def match
def match_strict (cls, phone_number) :
return cls._match (phone_number, cls.cc_regexp_strict)
# end def match_strict
def _match (cls, phone_number, regexp) :
match = regexp.match (phone_number)
if match :
return Match (cls (regexp.cc), match, phone_number)
# end def _match
# end class M_Country
@pyk.adapt__bool__
@pyk.adapt__str__
class Country (TFL.Meta.BaM (TFL.Meta.Object, metaclass = M_Country)) :
"""Provide phone number mapping for a specific country."""
Table = {}
### 3-Aug-2015 16:48
### https://en.wikipedia.org/wiki/Local_conventions_for_writing_telephone_numbers#Denmark
### A : ndc digit
### B : sn digit
format_map = \
{ "31" : ["AA-BBBBBBB"] # Netherlands
, "32" : ["A BBB BB BB", "AA BB BB BB", "AAA BB BB BB"] # Belgium
, "33" : ["A BB BB BB BB"] # France
, "34" : ["AAA BBB BBB", "AA B BBB BBB", "A BB BBB BBB"] # Spain
, "351" : ["AA BB BB BBB", "AAA BBB BBB"] # Portugal
, "353" : ["AA BBB BBBB"] # Ireland
, "358" : ["AA BBB BB BB", "A BBB BBB"] # Finland
, "36" : ["A BBB BBB", "A BBB BBBB"] # Hungary
, "39" : ["AAA BBBBBBB"] # Italy
, "41" : ["AA BBB BB BB"] # Switzerland
, "420" : ["A BB BBB BBB", "AA B BBB BBB", "AAA BBB BBB"] # Czech Republic
, "44" : ["AA BBBB BBBB", "AAAA BBB BBBB", "AAAAA BBBBBB"] # UK
, "45" : ["BB BB BB BB"] # Denmark
, "47" : ["A B BB BB BB", "AA BB BB BB", "AAA BB BBB"] # Norway
, "48" : ["AA BBB BB BB", "AAA BBB BBB"] # Poland
, "49" : ["AAAA BBBBBB"] # Germany
}
formatted_sn = Multi_Re_Replacer \
( Re_Replacer (r"^(\d{2,3})(\d{2,3})(\d{2,4})$", r"\1 \2 \3")
, Re_Replacer (r"^(\d{2})(\d{2,3})$", r"\1 \2")
, Re_Replacer (r"^(\d{4})(\d{3,5})(\d{4})$", r"\1 \2 \3")
)
formatted_sn_4x2 = Re_Replacer \
( r"^(\d{2})(\d{2})(\d{2})(\d{2})$", r"\1 \2 \3 \4")
ndc_info_map = None ### Only available for `Country_M`
ndc_max_length = 4
ndc_min_length = 1
ndc_prefix = "0"
_number_cleaner = Re_Replacer (r"[^0-9]", "")
def __init__ (self, name, code) :
self.name = name
self.code = code
# end def __init__
@Once_Property
def ndc_sn_max_length (self) :
code = self.code
default = 15 - len (code)
return self.__class__.ndc_data.ndc_sn_max_length.get (code, default)
# end def ndc_sn_max_length
@Once_Property
def ndc_sn_min_length (self) :
code = self.code
return self.__class__.ndc_data.ndc_sn_min_length.get (code, 5)
# end def ndc_sn_min_length
def cleaned_ndc (self, ndc) :
if ndc :
return self._number_cleaner (ndc)
# end def cleaned_ndc
def cleaned_sn (self, ndc, sn) :
if sn :
result = self._number_cleaner (sn)
l_sn = len (result)
max_sn = self.sn_max_length (ndc)
min_sn = self.sn_min_length (ndc)
if min_sn > l_sn :
raise E164.SN_Too_Short \
(self, "-".join ((ndc, sn)), l_sn, min_sn)
elif l_sn > max_sn :
raise E164.SN_Too_Long \
(self, "-".join ((ndc, sn)), l_sn, max_sn)
else :
return result
# end def cleaned_sn
def ndc_info (self, ndc) :
pass
# end def ndc_info
def sn_max_length (self, ndc) :
return self.ndc_sn_max_length - len (ndc)
# end def sn_max_length
def sn_min_length (self, ndc) :
return self.ndc_sn_min_length - len (ndc)
# end def sn_min_length
def split (self, ndc_sn) :
regexp = self.regexp
match = regexp.match (ndc_sn)
if match :
try :
r_ndc = regexp.ndc
except AttributeError :
ndc = ""
else :
ndc = self.cleaned_ndc (r_ndc)
sn = self.cleaned_sn (ndc, regexp.sn)
return ndc, sn
raise E164.ValueError (self, ndc_sn, self._split_error_tail (""))
# end def split
def _split_error_tail (self, tail) :
return tail
# end def _split_error_tail
def __bool__ (self) :
return True
# end def __bool__
def __repr__ (self) :
return "%s (%s)" % (_T ("Country"), self.code)
# end def __repr__
def __str__ (self) :
return "%r [%s]" % (self, self.name)
# end def __str__
# end class Country
@pyk.adapt__bool__
class Country_0 (Country) :
"""Country without further information about ndc and sn."""
regexp = Multi_Regexp \
( Regexp
( r" *0? *"
r"[-/]?"
r"(?P<ndc>\d{1,4})"
r"(?:[- /](?P<sn>[- 0-9]+)?)?$"
)
, Regexp
( r" *0? *"
r"\("
r"(?P<ndc>\d{1,4})"
r"\) *"
r"(?P<sn>[- 0-9]+)?$"
)
)
def _split_error_tail (self, tail) :
return \
( "".join
( ( self.__super._split_error_tail (tail)
, "\n "
, _T
( "Network destination code "
"and subscriber number need to be separated "
"by a space or dash."
)
)
)
)
# end def _split_error_tail
def __bool__ (self) :
return False
# end def __bool__
# end class Country_0
class Country_M (Country) :
"""Country with a separate module with detailed ndc information"""
ndc_info_map = None ### Need to define for country-specific sub-class
ndc_type_map = None ### May be defined for country-specific sub-class
ndc_usage_map = None ### May be defined for country-specific sub-class
sn_max_length_map = None ### May be defined for country-specific sub-class
sn_min_length_map = None ### May be defined for country-specific sub-class
@Once_Property
def ndc_matcher_fragment (self) :
"""Regular expression fragment matching national destination code."""
sk = lambda ndc : (- len (ndc), ndc)
ccs = sorted (self.ndc_info_map, key = sk)
result = "".join (("(?P<ndc>", "|".join (ccs), ")"))
return result
# end def ndc_matcher_fragment
@Once_Property
def ndc_prefix_matcher_fragment (self) :
prefix = self.ndc_prefix
return r" *(?:%s)? *" % (prefix, ) if prefix else ""
# end def ndc_prefix_matcher_fragment
@Once_Property
def regexp (self) :
return Multi_Regexp \
( Regexp \
( self.ndc_prefix_matcher_fragment
+ r"[-/ ]?"
+ self.ndc_matcher_fragment
+ r"[-/ ]?"
+ r"(?P<sn>[- 0-9]+)?$"
)
, Regexp \
( self.ndc_prefix_matcher_fragment
+ r"\("
+ self.ndc_matcher_fragment
+ r"\) *"
+ r"(?P<sn>[- 0-9]+)?$"
)
,
)
# end def regexp
def ndc_info (self, ndc) :
try :
return self.ndc_info_map [ndc]
except (AttributeError, KeyError) :
pass
# end def ndc_info
def sn_max_length (self, ndc) :
try :
result = self.sn_max_length_map [ndc]
except (AttributeError, LookupError) :
result = None
if result is None :
result = self.__super.sn_max_length (ndc)
return result
# end def sn_max_length
def sn_min_length (self, ndc) :
try :
result = self.sn_min_length_map [ndc]
except (AttributeError, LookupError) :
result = None
if result is None :
result = self.__super.sn_min_length (ndc)
return result
# end def sn_min_length
# end class Country_M
class Country_R (Country) :
"""Country with a regexp matching ndc and sn"""
regexp = None
def __init__ (self, name, code, regexp = None) :
self.__super.__init__ (name, code)
if regexp is not None :
self.regexp = regexp
# end def __init__
# end class Country_R
class Match (TFL.Meta.Object) :
"""Match of `Country` for `phone_number`"""
_ndc = None
_sn = None
def __init__ (self, country, match, phone_number) :
self.country = country
self.cc = country.code
self.match = match
self.phone_number = phone_number
# end def __init__
@Once_Property
def attr_dict (self) :
return dict \
( cc = self.cc
, ndc = self.ndc
, sn = self.sn
)
# end def attr_dict
@property
def ndc (self) :
result = self._ndc
if result is None :
self._set_ndc_sn ()
result = self._ndc
return result
# end def ndc
@Once_Property
def ndc_info (self) :
return self.country.ndc_info (self.ndc)
# end def ndc_info
@Once_Property
def ndc_sn (self) :
return self.phone_number [self.match.end ():].strip ()
# end def ndc_sn
@property
def sn (self) :
result = self._sn
if result is None :
self._set_ndc_sn ()
result = self._sn
return result
# end def sn
def _set_ndc_sn (self) :
self._ndc, self._sn = self.country.split (self.ndc_sn)
# end def _set_ndc_sn
def __repr__ (self) :
return "Match for %r: %s" % (self.country, self.ndc_sn)
# end def __repr__
def __str__ (self) :
return "Match for %s: %s" % (self.country, self.ndc_sn)
# end def __str__
# end class Match
_test_country_match = r"""
>>> AT = Country (43)
>>> AT
Country (43)
>>> m = Country.match ("43 664 123 45 67")
>>> m.country
Country (43)
>>> print (portable_repr (m.ndc_sn))
'664 123 45 67'
>>> print (m.ndc_info)
Mobile (A1)
>>> m.country is AT
True
>>> m.country is Country (43)
True
>>> print (Country.match_strict ("43 664 123 45 67"))
None
>>> print (Country.match_strict ("+43 664 123 45 67"))
Match for Country (43) [Austria]: 664 123 45 67
>>> print (Country.match_strict ("0043 664 123 45 67"))
Match for Country (43) [Austria]: 664 123 45 67
>>> print (portable_repr ((m.ndc, m.sn)))
('664', '1234567')
>>> m = Country.match ("436641234567")
>>> print (portable_repr ((m.ndc, m.sn)))
('664', '1234567')
>>> m = Country.match ("439101234567")
>>> with expect_except (ValueError) :
... m.country.split (m.ndc_sn)
ValueError: Not a proper phone number for Country (43) [Austria]: 9101234567
>>> m = Country.match ("+41 43 123 45 67")
>>> print (m.country)
Country (41) [Switzerland (Confederation of)]
>>> print (m.ndc_info)
Zurich
>>> print (portable_repr ((m.ndc, m.sn)))
('43', '1234567')
>>> m = Country.match ("3861 123 45 67")
>>> print (m.country)
Country (386) [Slovenia (Republic of)]
>>> print (m.ndc_info)
Ljubljana
>>> print (portable_repr ((m.ndc, m.sn)))
('1', '1234567')
>>> m = Country.match ("38651 123 456")
>>> print (portable_repr ((m.ndc, m.sn)))
('51', '123456')
>>> print (m.ndc_info)
Telekom Slovenije
>>> m = Country.match ("38671 123 45 67")
>>> with expect_except (ValueError) :
... m.ndc
SN_Too_Long: Not a proper phone number for Country (386) [Slovenia (Republic of)]: 71-123 45 67; subscriber number must have at most 6 digits; got 7 digits instead
>>> m = Country.match ("+49 89 123 45 67")
>>> print (m.country)
Country (49) [Germany (Federal Republic of)]
>>> print (m.ndc_info)
None
>>> print (portable_repr ((m.ndc, m.sn)))
('89', '1234567')
>>> m = Country.match ("+49891234567")
>>> print (m.country)
Country (49) [Germany (Federal Republic of)]
>>> with expect_except (ValueError) :
... print (portable_repr ((m.ndc, m.sn)))
ValueError: Not a proper phone number for Country (49) [Germany (Federal Republic of)]: 891234567
Network destination code and subscriber number need to be separated by a space or dash.
>>> m = Country.match ("+39 045 1234567")
>>> print (m.ndc_info)
Province of Verona
>>> print (portable_repr ((m.ndc, m.sn)))
('045', '1234567')
>>> m = Country.match ("+39 045 123456789")
>>> with expect_except (ValueError) :
... print (portable_repr ((m.ndc, m.sn)))
SN_Too_Long: Not a proper phone number for Country (39) [Italy, Vatican]: 045-123456789; subscriber number must have at most 8 digits; got 9 digits instead
>>> m = Country.match ("+39 045 12345")
>>> with expect_except (ValueError) :
... print (portable_repr ((m.ndc, m.sn)))
SN_Too_Short: Not a proper phone number for Country (39) [Italy, Vatican]: 045-12345; subscriber number must have at least 6 digits; got 5 digits instead
"""
__test__ = dict \
( test_country_match = _test_country_match
)
if __name__ != "__main__" :
GTW.OMP.PAP.E164._Export ("*")
### __END__ GTW.OMP.PAP.E164.Country | PypiClean |
/MoonNectar-0.6.0.tar.gz/MoonNectar-0.6.0/mnectar/action.py | from __future__ import annotations
import functools
import inspect
from copy import copy
from dataclasses import dataclass, field
from typing import Tuple, Callable, Optional
from mnectar.util.signal import BoundSignal
from mnectar.config import Setting
from mnectar.util import classproperty
class Actionable:
app = None
@property
def actionables(self):
# Look up actions via the class (not instance) to avoid infinite recursion
return {
key: getattr(self, key)
for key, unbound in inspect.getmembers(
type(self), lambda _: isinstance(_, Action)
)
}
def __init__(self, *arg, app=None, **kw):
self.app = self.app or app
super().__init__(*arg, **kw)
if self.app:
if not hasattr(self.app, '_actionable_instances'):
self.app._actionable_instances = []
self.app._actionable_instances.append(self)
@dataclass
class Action:
# fmt: off
menu: str
group: str = ""
name: str = ""
shortcut_default: str = ""
checkable: bool = False
exclusive: bool = False
setting: Optional[Setting] = None
signal: Optional[BoundSignal] = None
args: Tuple = field(default_factory = tuple)
_instance: Actionable = field(default = None, init = False)
_name: str = field(default = "", init = False)
_triggered_cb: Callable = field(default = None, init = False)
_shortcut_cb: Callable = field(default = None, init = False)
# fmt: on
shortcut_overrides = Setting()
def __post_init__(self):
self._instance = None
if self.setting and self.signal:
raise ValueError("An action cannot specify both a setting and a signal!")
def __set_name__(self, owner, name):
self._name = name
@property
def _shortcut_key(self):
return (self.menu, self.group, self.name)
@property
def shortcut(self):
if self.shortcut_overrides is None:
self.shortcut_overrides = {}
if self._shortcut_key in self.shortcut_overrides:
return self.shortcut_overrides[self._shortcut_key]
else:
return self.shortcut_default
@shortcut.setter
def shortcut(self, value):
if self.shortcut_overrides is None:
self.shortcut_overrides = {}
if value == self.shortcut_default:
del self.shortcut
else:
self.shortcut_overrides[self._shortcut_key] = value
if callable(self._shortcut_cb):
self._shortcut_cb(value)
@shortcut.deleter
def shortcut(self):
if self._shortcut_key in self.shortcut_overrides:
del self.shortcut_overrides[self._shortcut_key]
if callable(self._shortcut_cb):
self._shortcut_cb(self.shortcut_default)
def set_shortcut_change_callback(self, cb = None):
self._shortcut_cb = cb
def bind(self, instance, owner):
bound = copy(self)
bound._instance = instance
bound.app = getattr(instance, 'app', None)
if bound.signal:
bound.signal = bound.signal.bind(instance, owner)
if bound.setting and type(bound.is_checked()) == bool:
bound.checkable = True
if self._triggered_cb:
bound._triggered_cb = functools.partial(self._triggered_cb, instance)
return bound
def get_setting(self):
"""
Return the value of the setting object.
"""
return self.setting.__get__(self._instance, type(self._instance))
def set_setting(self, value):
"""
Set the value of the setting.
"""
self.setting.__set__(self._instance, value)
def __get__(self, instance, owner):
if instance is None:
return self
elif self._name in instance.__dict__:
return instance.__dict__[self._name]
else:
bound = self.bind(instance, owner)
instance.__dict__[self._name] = bound
return bound
def __set__(self, instance, value):
raise ValueError(f"Read-Only Descriptor: {self._name}")
def is_checked(self):
if self.setting and self.args:
return (self.setting.__get__(self._instance, type(self._instance)),) == self.args
elif self.setting and not self.args:
return self.setting.__get__(self._instance, type(self._instance))
def triggered(self, function):
"""
Decorator used to set a callback when the action is triggered
"""
self._triggered_cb = function
return function
def on_triggered(self, checked=False):
if self.signal:
if self.args:
self.signal.emit(*self.args)
elif self.checkable:
self.signal.emit(checked)
else:
self.signal.emit()
elif self.setting:
if self.args:
self.set_setting(*self.args)
else:
self.set_setting(checked)
if self._triggered_cb:
if self.args:
self._triggered_cb(*self.args)
elif self.checkable:
self._triggered_cb(checked)
else:
self._triggered_cb() | PypiClean |
/JATA_Tools-0.1.9-py3-none-any.whl/CJH/CJH.py | import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
def find_between(s, first, last):
try:
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
start = s.index(first) + len(first)
return s[start:]
except BaseException as e:
print(e, first, last)
return 'NA'
class CJH_Archives:
def __init__(self, repo, url=False):
self.repo = repo
self.url = url
def get_meta_data(self, object_type, page_to_start_at, maximum_pages_to_scrape):
def scrape_all_records(object_type='records', start_page=1, stop_after_pages=0, url_override=False):
"""
URL OVERRIDE MUST BE WITHOUT THE START PAGE
"""
if start_page <= 0:
print("Must start at minimum of page 1")
start_page = 1
page = start_page
else:
page = start_page
if object_type.upper() == 'RECORDS':
print("Scraping All Individual Records")
headless_url = "https://archives.cjh.org/repositories/3/objects?q[]=%2A&op[]=OR&field[]=keyword&from_year[]=&to_year[]=&limit=digital_object,archival_object&sort=title_sort%20asc&page="
base_URL = str(headless_url + str(page))
elif object_type.upper() == 'COLLECTIONS':
# page = start_page
print("Scraping Collections (Finding Aids)")
headless_url = "https://archives.cjh.org/repositories/3/resources?q[]=%2A&op[]=&field[]=title&from_year[]=&to_year[]=&limit=resource&sort=year_sort%20asc&page="
base_URL = str(headless_url + str(page))
elif object_type.upper() == 'DIGITAL':
# page = start_page
print("Scraping Digital Records")
headless_url = "https://archives.cjh.org/repositories/3/objects?q[]=%2A&op[]=OR&field[]=keyword&from_year[]=&to_year[]=&limit=digital_object&sort=year_sort%20asc&page="
base_URL = str(headless_url + str(page))
elif object_type.upper() == 'CUSTOM':
headless_url = url_override
base_URL = str(headless_url + str(page))
def scrape_record(name, link, web_page, object_type):
# print(web_page, link)
# (.+?)
# meta_dict = find_between(str(i),'<script type="application/ld+json">',' </script>' )
# meta_dict = re.findall(r'>(', str(web_page))
title = (web_page.title)
part_of = web_page.find_all('ul', {'class': 'breadcrumb'})
part_of = part_of[0].find_all('a')
location_tupes = []
for i in part_of:
link = (str(i).split('"')[1])
found_loc_name = (str(i).split('>')[1]).split('<')[0]
tupp = (found_loc_name, link)
location_tupes.append(tupp)
locs = (location_tupes)
subnotes = web_page.find_all('div', {'class': 'upper-record-details'})[0].text
div_data_1 = [("Name", name), ("Link", link)]
acord = web_page.find_all('div', {'class': 'acc_holder clear'})[0].text
acc_data = []
if object_type.upper() == 'RECORDS':
possible_fields_1 = [
"Scope and Contents",
"Dates",
"Language of Materials",
"Access Restrictions",
"Extent",
]
possible_fields_2 = [
"Related Names",
"Digital Material",
"Physical Storage Information",
"Repository Details",
]
elif object_type.upper() == 'COLLECTIONS':
possible_fields_1 = [
"Scope and Content Note",
"Dates",
"Creator",
"Access Restrictions",
"Use Restrictions",
"Conditions Governing Access",
"Conditions Governing Use",
"Extent",
"Language of Materials"
]
possible_fields_2 = [
"Additional Description",
"Subjects",
"Related Names",
"Finding Aid & Administrative Information",
'Physical Storage Information',
'Repository Details',
]
##subnotes
b1 = []
pc_1 = []
for i in possible_fields_1:
if i in str(subnotes):
out = True
else:
out = False
missingTuple = (i, '')
div_data_1.append(missingTuple)
pc_1.append(str(subnotes).find(i))
b1.append(out)
##accordian
b2 = []
pc_2 = []
for i in possible_fields_2:
if i in str(acord):
out = True
else:
out = False
missingTuple = (i, '')
div_data_1.append(missingTuple)
pc_2.append(str(acord).find(i))
b2.append(out)
xs = possible_fields_1
ys = b1
filtered1 = np.array(xs)[np.array(ys)]
xs = possible_fields_2
ys = b2
filtered2 = np.array(xs)[np.array(ys)]
no_emps1 = filter(lambda a: a != -1, pc_1)
no_emps2 = filter(lambda a: a != -1, pc_2)
aaa = [y for x, y in sorted(zip(no_emps1, filtered1))]
bbb = [y for x, y in sorted(zip(no_emps2, filtered2))]
indexer = 0
filtered1 = aaa
filtered2 = bbb
for i in filtered1:
first = i
try:
next = filtered1[indexer + 1]
except BaseException as e:
next = '$$$'
value = find_between(subnotes, first, next)
value = value.replace('\n', ' ').strip().replace('\t', ' ')
val = (i, value)
div_data_1.append(val)
indexer += 1
indexer = 0
for i in filtered2:
first = i
try:
next = filtered1[indexer + 1]
except BaseException as e:
next = '$$$'
value = find_between(acord, first, next)
value = value.replace('\n', ' ').strip().replace('\t', ' ')
val = (i, value)
div_data_1.append(val)
indexer += 1
bigList = (div_data_1)
return tuple(bigList)
URL = base_URL
web_page = BeautifulSoup(requests.get(URL, {}).text, "lxml")
pagnation = web_page.find_all('ul', {'class': 'pagination'})[0].find_all('li')
next_link = (web_page.find_all('li', {'class': 'next'})[0]).find('a', href=True)
linkky = str(next_link)
nextPage_ = str("https://archives.cjh.org" + (linkky.split('"')[1]))
pageList = []
s_pages = []
for i in pagnation:
number = str(i).split('>')[2].split('<')[0]
pageList.append((number))
test_list = []
for i in pageList:
try:
test_list.append(int(i))
except:
pass
last_page__ = (max(test_list))
__lastPage = page + stop_after_pages
page_counter = page
tupleList = []
for i in range(page, __lastPage):
row_list = []
pagez = i
print("Scraping Archive Index for Entry Links", pagez)
page_current = page_counter
URL = str(headless_url + str(i))
web_page = BeautifulSoup(requests.get(URL, {}).text, "lxml")
h3s = web_page.find_all('h3')
for i in h3s:
try:
link = ((str(i).split('href="')[1]).split('"'))[0]
name = (str(i).split('">'))[1].split("</a")[0]
data_tuple = (name, str("https://archives.cjh.org" + link), link)
tupleList.append(data_tuple)
except BaseException as e:
pass
page_counter += 1
archIndex = pd.DataFrame.from_records(tupleList, columns=['Names', 'Link', 'Location'])
# ...
counter = 0
print("Number of Objects Extracted: ", len(archIndex))
print("Scraping entry meta data...")
for i in archIndex.itertuples():
counter += 1
name = i.Names
link = i.Link
link123 = link
Location = i.Location
web_page = BeautifulSoup(requests.get(link, {}).text, "lxml")
record_row = scrape_record(name, link123, web_page, object_type.upper())
row_list.extend(record_row)
print("Record: ", counter, link123, name)
s_pages.extend(row_list)
d = {}
for x, y in s_pages:
d.setdefault(x, []).append(y)
df = pd.DataFrame.from_records(d).drop_duplicates()
if object_type.upper() == 'RECORDS':
pass
elif object_type.upper() == 'COLLECTIONS':
df['Use Terms'] = df['Use Restrictions'] + df['Conditions Governing Use']
# df1.replace('NA',np.nan,inplace=True)
df['Access Terms'] = df['Access Restrictions'] + df['Conditions Governing Access']
dropThese = [
'Use Restrictions',
'Conditions Governing Use',
'Access Restrictions',
'Conditions Governing Access',
]
df.drop(columns=dropThese, inplace=True)
else:
pass
return (df.reset_index(drop=True))
if self.repo.upper() == 'AJHS':
print('Creating CJHA Scraper Object for AJHS')
self.meta_df = scrape_all_records(object_type, page_to_start_at, maximum_pages_to_scrape)
return self.meta_df
elif self.repo.upper() == 'CUSTOM':
self.meta_df = scrape_all_records(page_to_start_at, maximum_pages_to_scrape, self.url)
else:
print("Sorry, only AJHS and CUSTOM are currently supported :,(")
pass | PypiClean |
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/terminal/paint_target.py | class IPaintTarget(object):
"""This interface defines the methods for painting XML to the UIDL
stream.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: 1.1.2
"""
def addSection(self, sectionTagName, sectionData):
"""Prints single XMLsection.
Prints full XML section. The section data is escaped from XML
tags and surrounded by XML start and end-tags.
@param sectionTagName:
the name of the tag.
@param sectionData:
the scetion data.
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def startTag(self, paintable, tag=None):
"""Prints element start tag of a paintable section. Starts a paintable
section using the given tag. The IPaintTarget may implement a caching
scheme, that checks the paintable has actually changed or can a cached
version be used instead. This method should call the startTag method.
If the Paintable is found in cache and this function returns true it
may omit the content and close the tag, in which case cached content
should be used.
@param paintable:
the paintable to start.
@param tag:
the name of the start tag.
@return: C{True} if paintable found in cache, C{False} otherwise.
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def paintReference(self, paintable, referenceName):
"""Paints a component reference as an attribute to current tag. This
method is meant to enable component interactions on client side. With
reference the client side component can communicate directly to other
component.
Note! This was experimental api and got replaced by L{addAttribute}.
@param paintable:
the Paintable to reference
@param referenceName:
@raise PaintException
@deprecated: use L{addAttribute} or L{addVariable} instead
"""
raise NotImplementedError
def endTag(self, tagName):
"""Prints element end tag.
If the parent tag is closed before every child tag is closed an
PaintException is raised.
@param tagName:
the name of the end tag.
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def addAttribute(self, *args):
"""Adds a boolean attribute to component. Attributes must be added
before any content is written.
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def addVariable(self, *args):
"""Adds details about L{StreamVariable} to the UIDL stream.
Eg. in web terminals Receivers are typically rendered for the client
side as URLs, where the client side implementation can do an http
post request.
The urls in UIDL message may use Muntjac specific protocol. Before
actually using the urls on the client side, they should be passed via
L{ApplicationConnection.translateMuntjacUri}.
Note that in current terminal implementation StreamVariables are
cleaned from the terminal only when:
- a StreamVariable with same name replaces an old one
- the variable owner is no more attached
- the developer signals this by calling
L{StreamingStartEvent.disposeStreamVariable}
Most commonly a component developer can just ignore this issue, but
with strict memory requirements and lots of StreamVariables
implementations that reserve a lot of memory this may be a critical
issue.
@param args: tuple of the form
- (owner, name, value)
1. the ReceiverOwner that can track the progress of streaming
to the given StreamVariable
2. an identifying name for the StreamVariable
3. the StreamVariable to paint
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def addUploadStreamVariable(self, owner, name):
"""Adds a upload stream type variable.
@param owner:
the Listener for variable changes.
@param name:
the Variable name.
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def addXMLSection(self, sectionTagName, sectionData, namespace):
"""Prints single XML section.
Prints full XML section. The section data must be XML and it is
surrounded by XML start and end-tags.
@param sectionTagName:
the tag name.
@param sectionData:
the section data to be printed.
@param namespace:
the namespace.
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def addUIDL(self, uidl):
"""Adds UIDL directly. The UIDL must be valid in accordance with
the UIDL.dtd
@param uidl:
the UIDL to be added.
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def addText(self, text):
"""Adds text node. All the contents of the text are XML-escaped.
@param text:
the Text to add
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def addCharacterData(self, text):
"""Adds CDATA node to target UIDL-tree.
@param text:
the Character data to add
@raise PaintException:
if the paint operation failed.
"""
raise NotImplementedError
def getTag(self, paintable):
"""@return: the "tag" string used in communication to present given
L{IPaintable} type. Terminal may define how to present
paintable.
"""
raise NotImplementedError
def isFullRepaint(self):
"""@return true if a full repaint has been requested. E.g. refresh
in a browser window or such.
"""
raise NotImplementedError | PypiClean |
/OASYS1_HALF_SRW-0.0.3-py3-none-any.whl/orangecontrib/srw/widgets/tools/ow_srw_wavefront_to_wofry.py | from orangewidget import gui
from oasys.widgets import widget
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QApplication, QMessageBox
from PyQt5.QtCore import QRect
from wofry.propagator.wavefront2D.generic_wavefront import GenericWavefront2D
from orangecontrib.srw.util.srw_objects import SRWData
class OWToWofryWavefront2d(widget.OWWidget):
name = "To Wofry Wavefront 2D"
id = "toWofryWavefront2D"
description = "To Wofry Wavefront 2D"
icon = "icons/to_wofry_wavefront_2d.png"
priority = 21
category = ""
keywords = ["wise", "gaussian"]
inputs = [("SRWData", SRWData, "set_input")]
outputs = [{"name":"GenericWavefront2D",
"type":GenericWavefront2D,
"doc":"GenericWavefront2D",
"id":"GenericWavefront2D"}]
CONTROL_AREA_WIDTH = 605
srw_data = None
want_main_area = 0
def __init__(self):
super().__init__()
geom = QApplication.desktop().availableGeometry()
self.setGeometry(QRect(round(geom.width()*0.05),
round(geom.height()*0.05),
round(min(geom.width()*0.98, self.CONTROL_AREA_WIDTH+10)),
round(min(geom.height()*0.95, 100))))
self.setFixedHeight(self.geometry().height())
self.setFixedWidth(self.geometry().width())
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
label = gui.label(self.controlArea, self, "From SRW Wavefront To Wofry Wavefront")
font = QFont(label.font())
font.setBold(True)
font.setItalic(True)
font.setPixelSize(14)
label.setFont(font)
palette = QPalette(label.palette()) # make a copy of the palette
palette.setColor(QPalette.Foreground, QColor('Dark Blue'))
label.setPalette(palette) # assign new palette
gui.separator(self.controlArea, 10)
gui.button(self.controlArea, self, "Convert", callback=self.convert_wavefront, height=45)
def set_input(self, input_data):
self.setStatusMessage("")
if not input_data is None:
self.srw_data = input_data
self.convert_wavefront()
def convert_wavefront(self):
try:
if not self.srw_data is None:
self.send("GenericWavefront2D", self.srw_data.get_srw_wavefront().toGenericWavefront())
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception | PypiClean |
/FeatureSelectionUsingGA-1.0.0-py3-none-any.whl/fsga.py | import numpy as np
import random
import matplotlib.pyplot as plt
import numpy.random as npr
#Code author: Sriram Ranganathan, Meng. ECE, University of Waterloo
class candidate:
def __init__(self, bitstream, fitness = 0.00):
self.fitness = fitness
self.bitstream = bitstream
def __eq__(self, x):
if self.bitstream == x.bitstream:
return True
return False
class top_solution:
def __init__(self, new, iter = 0):
self.iter = iter
self.new = new
#Code author: Sriram Ranganathan, Meng. ECE, University of Waterloo
class Genetic_algorithm:
def __init__(self, input_data_x, input_data_y, max_population, crossover_prob, mutation_r, stop_by_f, stop_fitness, fitness_func):
self.input_data_x = input_data_x
self.input_data_y = input_data_y.to_numpy()
self.max_population = max_population
self.columns = self.input_data_x.columns
self.fitness_func = fitness_func
self.populate()
self.calculate_fitness()
self.mating_pool_size = max_population//5
self.crossover_prob = crossover_prob
self.mutation_r = mutation_r
self.Best_solutions = []
self.Best_solutions_bit = []
self.Best_iteration = 0
self.stop_fitness = stop_fitness
self.stop_by_f = stop_by_f
self.fitness_dispersion = []
self.len_bitstream_dispersion = []
def train_test_split(self, train_x, train_y, test_size):
data = list(zip(train_x, train_y)) # Combine train_x and train_y
random.shuffle(data) # Shuffle the data randomly
test_size = int(len(train_x)*test_size)
split_index = len(data) - test_size
train_data = data[:split_index]
test_data = data[split_index:]
train_x, train_y = zip(*train_data) # Unzip the train_data
test_x, test_y = zip(*test_data) # Unzip the test_data
return train_x, train_y, test_x, test_y
def evolve(self, no_iters):
print("Genetic Algorithm Evolving")
i = 0
self.average = []
self.Top_sols = []
self.worst_sols = []
self.tot_crossov = []
self.tot_mut = []
l = range(0,no_iters)
self.crossover = 0
self.mutation = 0
for i in l:
top_sol = self.current_population[0]
self.Best_solutions.append(top_sol.fitness)
self.Best_solutions_bit.append(top_sol.bitstream)
print("Top solution fitness "+ str(top_sol.fitness))
print("Iteration_No: ", i)
fitness = [m.fitness for m in self.current_population]
self.average.append(sum(fitness)/len(fitness))
self.Top_sols.append(max(fitness))
self.worst_sols.append(min(fitness))
if ((top_sol.fitness > self.stop_fitness) & (self.stop_by_f)):
return top_sol
self.current_population = self.cross_over_mutate(self.current_population)
self.calculate_fitness()
i+=1
self.current_population.sort(key=lambda x: x.fitness, reverse=True)
self.tot_crossov.append(self.crossover)
self.tot_mut.append(self.mutation)
best_ind = np.argmax(self.Best_solutions)
best_bit_stream = np.array(self.Best_solutions_bit[best_ind])
columns_to_keep = np.where(best_bit_stream == 1)[0].tolist()
return max(self.Best_solutions), columns_to_keep
def populate(self, initial = False):
print("Creating Initial population")
self.current_population = []
for i in range(0,self.max_population):
bitstream = []
for i in self.input_data_x.columns:
if random.randrange(10)<=5:
bitstream.append(1)
else:
bitstream.append(0)
new_cand = candidate(bitstream)
rep = False
for i in self.current_population:
if bitstream == i.bitstream:
rep = True
break
if rep == True:
continue
self.current_population.append(new_cand)
return
def calculate_fitness(self):
print("Calculating fitness")
for i in self.current_population:
new_data_frame = self.input_data_x
bitstream = i.bitstream
drop_columns = []
for k in range(0,len(bitstream)):
if bitstream[k] == 1:
continue
if bitstream[k] == 0:
drop_columns.append(self.columns[k])
new_data_frame = self.input_data_x.drop(drop_columns, axis = 1)
Train_x = new_data_frame.to_numpy()
X_train, y_train ,X_test, y_test = self.train_test_split(Train_x, self.input_data_y, 0.2)
i.fitness = self.fitness_func(X_train, y_train, X_test, y_test, )
return
def roulette_select_one(self, c_population):
max = sum([f.fitness for f in c_population])
probs = [f.fitness/max for f in c_population]
return c_population[npr.choice(len(c_population), p=probs)]
def cross_over_mutate(self, current_population):
self.fitness_dispersion.append([f.fitness for f in self.current_population])
y = [np.array(f.bitstream) for f in self.current_population]
y = [np.sum(l) for l in y]
self.len_bitstream_dispersion.append(y)
current_population.sort(key=lambda x: x.fitness, reverse=True)
new_population = current_population[0:2]
print("Top 2 Fitness of new population", new_population[0].fitness, new_population[1].fitness)
mating_pool = current_population[:self.mating_pool_size].copy()
m = 0
while(len(new_population)<len(current_population)):
n = m
if m>=len(mating_pool):
n = m%len(mating_pool)
p1 = self.roulette_select_one(mating_pool)
new_mating_pool = mating_pool.copy()
new_mating_pool.pop(n)
new_cand = candidate([], 0)
if random.uniform(0, 1)<=self.crossover_prob:
self.crossover+=1
p2 = self.roulette_select_one(mating_pool)
trait_split = random.randrange(self.input_data_x.shape[1])
L = [k for k in range(trait_split,self.input_data_x.shape[1])]
trait_split1 = random.choice(L)
new_bitstream = self.mutate(p1.bitstream[0:trait_split] + p1.bitstream[trait_split:trait_split1]+p2.bitstream[trait_split1:])
new_cand.bitstream = new_bitstream
bs = [str(k) for k in new_cand.bitstream]
rep = False
new_population.append(new_cand)
m+=1
current_population = new_population.copy()
current_population.sort(key=lambda x: x.fitness, reverse=True)
return current_population
def mutate(self, bitstream):
for i in range(0,len(bitstream)):
if random.uniform(0, 1)<=self.mutation_r:
self.mutation
if bitstream[i]==0:
bitstream[i] = 1
else:
bitstream[i] = 0
return bitstream
def plot(self, path):
try:
plt.plot(range(0,len(self.Top_sols)),self.average, label = "Avg Fitness")
plt.plot(range(0,len(self.Top_sols)),self.Top_sols, label = "Max Fitness")
plt.plot(range(0,len(self.Top_sols)),self.worst_sols, label = "Min Fitness")
plt.xlabel('Generations')
plt.ylabel('Validation Accuracy from solutions(fitness)')
plt.legend(loc="lower right")
# displaying the title
plt.title("White wine")
plt.savefig(path)
except:
print("Please evolve first")
return | PypiClean |
/ModelSEEDpy-freiburgermsu-0.3.1.4.tar.gz/ModelSEEDpy-freiburgermsu-0.3.1.4/modelseedpy_freiburgermsu/community/commhelper.py | from modelseedpy_freiburgermsu.core.msminimalmedia import minimizeFlux_withGrowth, bioFlux_check
from modelseedpy_freiburgermsu.core.exceptions import NoFluxError, ObjectiveError
from modelseedpy_freiburgermsu.community.mscompatibility import MSCompatibility
from modelseedpy_freiburgermsu.core.msmodelutl import MSModelUtil
from modelseedpy_freiburgermsu.core.fbahelper import FBAHelper
from cobra import Model, Reaction, Metabolite
from cobra.medium import minimal_medium
from cobra.flux_analysis import pfba
from collections import OrderedDict
from optlang.symbolics import Zero
from optlang import Constraint
from math import inf, isclose
from pandas import DataFrame
from pprint import pprint
from numpy import mean
import re
def strip_comp(ID):
ID = ID.replace("-", "~")
return re.sub("(\_\w\d)", "", ID)
def export_lp(model, name):
with open(f"{name}.lp", 'w') as out:
out.write(model.solver.to_lp())
def correct_nonMSID(nonMSobject, output, model_index):
name, compartment = output
index = 0 if compartment == "e" else model_index
nonMSobject.compartment = compartment + str(index)
comp = re.search(r"(_\w\d+$)", nonMSobject.id)
if comp is None: return nonMSobject.id.replace(rf"[{compartment}]", f"_{nonMSobject.compartment}")
return "_".join([nonMSobject.id.replace(comp.group(), ""), nonMSobject.compartment])
def build_from_species_models(org_models, model_id=None, name=None, abundances=None,
standardize=False, copy_models=True, printing=False):
"""Merges the input list of single species metabolic models into a community metabolic model
Parameters
----------
org_models : list<Cobra.Model> to be merged into a community model
model_id : string specifying community model ID
name : string specifying community model name
names : list<string> human-readable names for models being merged
abundances : dict<string,float> relative abundances for input models in community model
cobra_model : bool for whether the raw COBRA model is returned
standardize: bool for whether the exchanges of each member model will be standardized (True) or just aligned.
Returns
-------
Cobra.Model for the desired Community
Raises
------
"""
# construct the new model
models = org_models if not standardize else MSCompatibility.standardize(
org_models, exchanges=True, conflicts_file_name='exchanges_conflicts.json')
biomass_indices = []
biomass_index = minimal_biomass_index = 2
new_metabolites, new_reactions = set(), set()
member_biomasses = {}
for model_index, org_model in enumerate(models):
model_util = MSModelUtil(org_model, copy=copy_models)
model_reaction_ids = [rxn.id for rxn in model_util.model.reactions]
model_index += 1
# Rename metabolites
for met in model_util.model.metabolites:
# Renaming compartments
output = MSModelUtil.parse_id(met)
if printing: print(met, output)
if output is None:
if printing: print(f"The {met.id} ({output}; {hasattr(met, 'compartment')}) is unpredictable.")
met.id = correct_nonMSID(met, (met.id, "c"), model_index)
elif len(output) == 2: met.id = correct_nonMSID(met, output, model_index)
elif len(output) == 3:
name, compartment, out_index = output
index = 0 if compartment == "e" else model_index
if out_index == "":
met.id += str(index)
met.compartment += str(index)
elif compartment == "e": met.compartment = "e0"
else:
met.compartment = compartment + str(index)
met.id = name + "_" + met.compartment
new_metabolites.add(met)
if "cpd11416_c" in met.id or "biomass" in met.id: member_biomasses[org_model.id] = met
# Rename reactions
for rxn in model_util.model.reactions: # !!! all reactions should have a non-zero compartment index
if rxn.id[0:3] != "EX_":
## biomass reactions
if re.search('^(bio)(\d+)$', rxn.id):
index = int(re.sub(r"(^bio)", "", rxn.id))
if biomass_index == 2:
while f"bio{biomass_index}" in model_reaction_ids: biomass_index += 1
if index not in biomass_indices and index >= minimal_biomass_index: biomass_indices.append(index)
else: # biomass indices can be decoupled from the respective reaction indices of the same model
rxn.id = "bio" + str(biomass_index)
if rxn.id not in model_reaction_ids: biomass_indices.append(biomass_index)
else:
index = minimal_biomass_index
rxn.id = "bio" + str(index)
while rxn.id not in model_reaction_ids and index not in biomass_indices:
index += 1
rxn.id = "bio" + str(index)
biomass_indices.append(index)
biomass_index += 1
## non-biomass reactions
else:
initialID = str(rxn.id)
output = MSModelUtil.parse_id(rxn)
if output is None:
if printing: print(f"The {rxn.id} ({output}; {hasattr(rxn, 'compartment')}) is unpredictable.")
try:
rxn.id = correct_nonMSID(rxn, (rxn.id, "c"), model_index)
except ValueError: pass
elif len(output) == 2: rxn.id = correct_nonMSID(rxn, output, model_index)
elif len(output) == 3:
name, compartment, index = output
if compartment != "e":
rxn.name = f"{name}_{compartment}{model_index}"
rxn_id = re.search(r"(.+\_\w)(?=\d+)", rxn.id).group()
if index == "": rxn.id += str(model_index)
else: rxn.id = rxn_id + str(model_index)
finalID = str(rxn.id)
string_diff = set(initialID).symmetric_difference(set(finalID))
if string_diff and not all(FBAHelper.isnumber(x) for x in string_diff):
print(f"The ID {initialID} is changed with {string_diff} to create the final ID {finalID}")
new_reactions.add(rxn)
# adds only unique reactions and metabolites to the community model
newmodel = Model(model_id or "+".join([model.id for model in models]),
name or "+".join([model.name for model in models]))
newmodel.add_reactions(FBAHelper.filter_cobra_set(new_reactions))
newmodel.add_metabolites(FBAHelper.filter_cobra_set(new_metabolites))
# Create community biomass
comm_biomass = Metabolite("cpd11416_c0", None, "Community biomass", 0, "c0")
metabolites = {comm_biomass: 1}
if abundances: abundances = {met: abundances[memberID] for memberID, met in member_biomasses.items()}
else: abundances = {cpd: -1 / len(member_biomasses) for cpd in member_biomasses.values()}
metabolites.update(abundances)
comm_biorxn = Reaction(id="bio1", name="bio1", lower_bound=0, upper_bound=1000)
comm_biorxn.add_metabolites(metabolites)
newmodel.add_reactions([comm_biorxn])
# update model components
newutl = MSModelUtil(newmodel)
newutl.add_objective(comm_biorxn.flux_expression)
newmodel.add_boundary(comm_biomass, "sink") # Is a sink reaction for reversible cpd11416_c0 consumption necessary?
if hasattr(newutl.model, "_context"): newutl.model._contents.append(member_biomasses)
elif hasattr(newutl.model, "notes"): newutl.model.notes.update(member_biomasses)
return newutl.model
def phenotypes(community_members, phenotype_flux_threshold=.1, solver:str="glpk"):
# log information of each respective model
models = OrderedDict()
solutions = []
media_conc = set()
# calculate all phenotype profiles for all members
comm_members = community_members.copy()
# print(community_members)
for org_model, content in community_members.items(): # community_members excludes the stationary phenotype
print("\n", org_model.id)
org_model.solver = solver
all_phenotypes = "phenotypes" not in content
model_util = MSModelUtil(org_model, True)
if "org_coef" not in locals():
org_coef = {model_util.model.reactions.get_by_id("EX_cpd00007_e0").reverse_variable: -1}
model_util.standard_exchanges()
models[org_model.id] = {"exchanges": model_util.exchange_list(), "solutions": {}, "name": content["name"]}
phenotypes = {met.name: {"consumed": met.id.replace("EX_", "").replace("_e0", "")}
for met in model_util.carbon_exchange_mets_list(include_unknown=False)
} if all_phenotypes else content["phenotypes"]
# print(phenotypes)
models[org_model.id]["phenotypes"] = ["stationary"] + [
content["phenotypes"].keys() for member, content in comm_members.items()]
phenoRXNs = [pheno_cpd for pheno, pheno_cpds in content['phenotypes'].items()
for pheno_cpd in pheno_cpds["consumed"]]
media = {cpd: 100 for cpd, flux in model_util.model.medium.items()}
#TODO correct or remove the media, since it seems to be overwritten by the optimization of all carbon exchanges
### eliminate hydrogen absorption
media.update({"EX_cpd11640_e0": 0})
past_phenoRXNs = []
for name, phenoCPDs in phenotypes.items():
pheno_util = MSModelUtil(model_util.model, True)
metID = phenoCPDs["consumed"][0]
try:
phenoRXN = pheno_util.model.reactions.get_by_id(f'EX_{metID}_e0')
if past_phenoRXNs:
del media[past_phenoRXNs[-1]]
except Exception as e:
print(e, f'\nEX_{metID}_e0 is not in the model {org_model.id}')
continue
media.update({phenoRXN.id: 100})
pheno_util.add_medium(media)
print(phenoRXN.id)
pheno_util.model.solver = solver
### define an oxygen absorption relative to the phenotype carbon source
# O2_consumption: EX_cpd00007_e0 <= phenotype carbon source # formerly <= 2 * sum(primary carbon fluxes)
coef = org_coef.copy()
coef.update({phenoRXN.reverse_variable: 1})
pheno_util.create_constraint(Constraint(Zero, lb=0, ub=None, name="EX_cpd00007_e0_limitation"), coef=coef)
## minimize the influx of all carbonaceous exchanges, mostly non-phenotype compounds, at a fixed biomass growth
min_growth = float(1) # arbitrarily assigned minimal growth
pheno_util.add_minimal_objective_cons(min_growth)
phenoRXN.upper_bound = 0
for ex in pheno_util.carbon_exchange_list():
exMet = ex.id.replace("EX_", "").replace("_e0", "")
if exMet in phenoRXNs and exMet != metID: ex.lower_bound = 0
# print(f"The new bounds of {exMet} exchange are: {ex.bounds}")
pheno_util.add_objective(Zero, "min", coef={
ex.reverse_variable: 1000 if ex.id != phenoRXN.id else 1
for ex in pheno_util.carbon_exchange_list()})
# export_lp(pheno_util.model, f"minimize_cInFlux_{phenoRXN.id}")
sol = pheno_util.model.optimize()
if sol.status != "optimal":
pheno_util.model.remove_cons_vars(["EX_cpd00007_e0_limitation"])
coef.update({phenoRXN.reverse_variable: 5})
pheno_util.create_constraint(
Constraint(Zero, lb=0, ub=None, name="EX_cpd00007_e0_limitation"), coef=coef)
sol = pheno_util.model.optimize()
bioFlux_check(pheno_util.model, sol)
### limit maximum consumption to the values from the previous minimization
for ex in pheno_util.carbon_exchange_list():
#### (limiting the reverse_variable is more restrictive than the net flux variable)
if ex.id != phenoRXN.id: ex.reverse_variable.ub = abs(min(0, sol.fluxes[ex.id]))
## maximize the phenotype yield with the previously defined growth and constraints
pheno_util.add_objective(phenoRXN.reverse_variable, "min")
# export_lp(pheno_util.model, f"maximize_phenoYield_{phenoRXN.id}")
pheno_sol = pheno_util.model.optimize()
bioFlux_check(pheno_util.model, pheno_sol)
pheno_influx = pheno_sol.fluxes[phenoRXN.id]
if pheno_influx >= 0:
if not all_phenotypes:
print(f"The phenotype carbon source has a flux of {pheno_sol.fluxes[phenoRXN.id]}.")
pprint({rxn: flux for rxn, flux in pheno_sol.fluxes.items() if flux != 0})
# TODO gapfill the model in media the non-functioning carbon source
raise NoFluxError(f"The (+) net flux of {pheno_influx} for the {phenoRXN.id} phenotype"
f" indicates that it is an implausible phenotype.")
print(f"NoFluxError: The (+) net flux of {pheno_influx} for the {phenoRXN.id}"
" phenotype indicates that it is an implausible phenotype.")
continue
phenoRXN.lower_bound = phenoRXN.upper_bound = pheno_influx
## maximize excretion of all potential carbon byproducts whose #C's < phenotype source #C's
phenotype_source_carbons = FBAHelper.rxn_mets_list(phenoRXN)[0].elements["C"]
minimum_fluxes = {}
for carbon_source in pheno_util.carbon_exchange_list(include_unknown=False):
if 0 < FBAHelper.rxn_mets_list(carbon_source)[0].elements["C"] < phenotype_source_carbons:
pheno_util.add_objective(carbon_source.flux_expression, "max")
minObj = pheno_util.model.slim_optimize()
# print(carbon_source.reaction, "\t", carbon_source.flux_expression, "\t", minObj)
if minObj > phenotype_flux_threshold:
minimum_fluxes[carbon_source.id] = minObj
# TODO limit the possible excreted compounds to only those that are defined in the media
excreted_compounds = list([exID for exID in minimum_fluxes.keys() if exID != "EX_cpd00011_e0"])
# minimum_fluxes_df = DataFrame(data=list(minimum_fluxes.values()), index=excreted_compounds, columns=["min_flux"])
# max_excretion_cpd = minimum_fluxes_df["minimum"].idxmin()
### optimize the excretion of the discovered phenotype excreta
if "excreted" in phenoCPDs:
phenoCPDs["excreted"] = [f"EX_{cpd}_e0" for cpd in phenoCPDs["excreted"]]
phenoCPDs["excreted"].extend(excreted_compounds)
else: phenoCPDs["excreted"] = excreted_compounds
pheno_excreta = [pheno_util.model.reactions.get_by_id(excreta)
for excreta in phenoCPDs["excreted"]]
pheno_util.add_objective(sum([ex.flux_expression for ex in pheno_excreta]), "max")
# export_lp(pheno_util.model, "maximize_excreta")
sol = pheno_util.model.optimize()
bioFlux_check(pheno_util.model, sol)
for ex in pheno_excreta:
ex.lower_bound = ex.upper_bound = sol.fluxes[ex.id]
## minimize flux of the total simulation flux through pFBA
# TODO discover why some phenotypes are infeasible with pFBA
try: pheno_sol = pfba(pheno_util.model)
# pheno_util.add_objective(sum([rxn.flux_expression for rxn in pheno_util.e]), "min")
# pheno_sol = pheno_util.model.optimize()
except Exception as e:
print(f"The {phenoRXN.id} phenotype of the {pheno_util.model} model is "
f"unable to be simulated with pFBA and yields a < {e} > error.")
sol_dict = FBAHelper.solution_to_variables_dict(pheno_sol, pheno_util.model)
simulated_growth = sum([flux for var, flux in sol_dict.items() if re.search(r"(^bio\d+$)", var.name)])
if not isclose(simulated_growth, min_growth):
display([(rxn, flux) for rxn, flux in pheno_sol.fluxes.items() if "EX_" in rxn and flux != 0])
raise ObjectiveError(f"The assigned minimal_growth of {min_growth} was not optimized"
f" during the simulation, where the observed growth was {simulated_growth}.")
## store solution fluxes and update the community_members phenotypes
met_name = strip_comp(name).replace(" ", "-")
col = content["name"] + '_' + met_name
models[pheno_util.model.id]["solutions"][col] = pheno_sol
solutions.append(models[pheno_util.model.id]["solutions"][col].objective_value)
met_name = met_name.replace("_", "-").replace("~", "-")
if all_phenotypes:
if "phenotypes" not in comm_members[org_model]:
comm_members[org_model]["phenotypes"] = {met_name: {"consumed": [strip_comp(metID)]}}
if met_name not in comm_members[org_model]["phenotypes"]:
comm_members[org_model]["phenotypes"].update({met_name: {"consumed": [strip_comp(metID)]}})
else: comm_members[org_model]["phenotypes"][met_name]["consumed"] = [strip_comp(metID)]
met_pheno = content["phenotypes"][met_name]
if "excreted" in met_pheno and strip_comp(metID) in met_pheno["excreted"]:
comm_members[org_model]["phenotypes"][met_name].update({"excreted": met_pheno})
past_phenoRXNs.append(phenoRXN.id)
# construct the parsed table of all exchange fluxes for each phenotype
cols = {}
## biomass row
cols["rxn"] = ["bio"]
for content in models.values():
for col in content["solutions"]:
cols[col] = [0]
if col not in content["solutions"]: continue
bio_rxns = [x for x in content["solutions"][col].fluxes.index if "bio" in x]
flux = mean([content["solutions"][col].fluxes[rxn] for rxn in bio_rxns
if content["solutions"][col].fluxes[rxn] != 0])
cols[col] = [flux]
## exchange reactions rows
looped_cols = cols.copy()
looped_cols.pop("rxn")
for content in models.values():
for ex_rxn in content["exchanges"]:
cols["rxn"].append(ex_rxn.id)
for col in looped_cols:
### reactions that are not present in the columns are ignored
flux = 0 if (col not in content["solutions"] or
ex_rxn.id not in list(content["solutions"][col].fluxes.index)
) else content["solutions"][col].fluxes[ex_rxn.id]
cols[col].append(flux)
## construct the DataFrame
fluxes_df = DataFrame(data=cols)
fluxes_df.index = fluxes_df['rxn']
fluxes_df.drop('rxn', axis=1, inplace=True)
fluxes_df = fluxes_df.groupby(fluxes_df.index).sum()
fluxes_df = fluxes_df.loc[(fluxes_df != 0).any(axis=1)]
fluxes_df.astype(str)
# fluxes_df.to_csv("fluxes.csv")
return fluxes_df, comm_members | PypiClean |
/AMFM_decompy-1.0.11.tar.gz/AMFM_decompy-1.0.11/amfm_decompy/pyQHM.py | import numpy as np
import scipy
"""
--------------------------------------------
Classes.
--------------------------------------------
"""
"""
Creates a single component object.
"""
class ComponentObj(object):
def __init__(self, H, harm):
self.mag = H[harm, 0, :]
self.phase = H[harm, 1, :]
self.freq = H[harm, 2, :]
"""
Synthsize the modulated component by using the extracted magnitude and
phase.
"""
def synthesize(self):
self.signal = 2*self.mag*np.cos(self.phase)
"""
Creates the output signal object (which, in its turn, is formed by n_harm
modulated components).
"""
class ModulatedSign(object):
def __init__(self, n_harm, file_size, fs, phase_tech='phase'):
self.n_harm = n_harm
self.size = file_size
self.fs = fs
self.H = np.zeros((self.n_harm, 3, self.size))
self.harmonics = [ComponentObj(self.H, i) for i in range(self.n_harm)]
self.error = np.zeros(self.size)
self.phase_tech = phase_tech
"""
Updates the 3-dimension array H, which stores the magnitude, phase and
frequency values from all components. Its first dimension refers to the
n_harm components, the second to the three composing parameters (where 0
stands for the magnitude, 1 for the phase and 2 for the frequency) and the
third dimension to the temporal axis.
"""
def update_values(self, a, freq, frame):
self.H[:, 0, frame] = np.abs(a)
self.H[:, 1, frame] = np.angle(a)
self.H[:, 2, frame] = freq
"""
Interpolate the parameters values when the extraction is not performed
sample-by-sample. While the interpolation from magnitude and frequency
is pretty straightforward, the phase one is not. Therefore, references
[1,2] present a solution for this problem.
"""
def interpolate_samp(self, samp_frames, pitch_track):
# Interpolation from magnitude and frequency.
for idx, func in [(0, 'linear'), (2, 'cubic')]:
f = scipy.interpolate.interp1d(samp_frames,
self.H[:, idx, samp_frames], kind=func)
self.H[:, idx, np.nonzero(pitch_track)[0]] = f(
np.nonzero(pitch_track)[0])
# Interpolation from phase.
step = samp_frames[1]-samp_frames[0]
sin_f = np.cumsum(np.sin(np.pi*np.arange(1, step)/step)).reshape(
1, step-1)
for idx, frame in np.ndenumerate(samp_frames[1:]):
if frame-samp_frames[idx] <= step:
cum_phase = np.cumsum(self.H[:, 2, samp_frames[idx]+1:frame+1],
axis=1)*2*np.pi
bad_phase = cum_phase[:, -1]+self.H[:, 1, samp_frames[idx]]
M = np.around(np.abs(self.H[:, 1, frame]-bad_phase)/(2*np.pi))
if frame-samp_frames[idx] < step:
end_step = frame-samp_frames[idx]
func = np.cumsum(np.sin(np.pi*np.arange(1, end_step) /
end_step)).reshape(1, end_step-1)
else:
func = sin_f
r_vec = (np.pi*(self.H[:, 1, frame]+2*np.pi*M-bad_phase) /
(2*(frame-samp_frames[idx]))).reshape(self.n_harm, 1)
new_phase = cum_phase[:, :-1]+r_vec*func + \
self.H[:, 1, samp_frames[idx]].reshape(self.n_harm, 1)
self.H[:, 1, samp_frames[idx]+1:frame] = ((new_phase + np.pi) %
(2*np.pi)-np.pi)
"""
Synthesize the final signal by initially creating each modulated component
and then summing all of them.
"""
def synthesize(self, N=None):
if N is None:
N = self.n_harm
[self.harmonics[i].synthesize()
for i in range(N)]
self.signal = sum([self.harmonics[i].signal
for i in range(self.n_harm)])
"""
Calculates the SRER (Signal-to-Reconstruction Error Ratio) for the
synthesized signal.
"""
def srer(self, orig_signal, pitch_track):
self.SRER = 20*np.log10(np.std(orig_signal[np.nonzero(pitch_track)[0]]) /
np.std(orig_signal[np.nonzero(pitch_track)[0]] -
self.signal[np.nonzero(pitch_track)[0]]))
"""
Extrapolates the phase at the border of the voiced frames by integrating
the edge frequency value. This procedure is necessary for posterior aQHM
calculations. Additionally, the method allows the replacement of the
extracted phase by the cumulative frequency. The objective is to provide
smoother bases for further aQHM and eaQHM calculations. Normally this is
not necessary, since that the interpolation process already smooths the
phase vector. But in a sample-by-sample extraction case, this substitution
is very helpful to avoid the degradation of aQHM and eaQHM performance
due the phase wild behaviour.
"""
def phase_edges(self, edges, window):
# Selects whether the phase itself or the cummulative frequency will be
# used.
if self.phase_tech is 'phase':
self.extrap_phase = np.unwrap(self.H[:, 1, :])
elif self.phase_tech is 'freq':
delta_phase = self.H[:, 1, edges[0]+1] - \
self.H[:, 2, edges[0]+1]*2*np.pi
self.extrap_phase = np.cumsum(self.H[:, 2, :], axis=1)*2*np.pi + \
delta_phase.reshape(self.n_harm, 1)
# Extrapolate the phase edges.
n_beg = -window.half_len_vec[::-1][:-1].reshape(1, window.N)
n_end = window.half_len_vec[1:].reshape(1, window.N)
for beg, end in zip(edges[::2], edges[1::2]):
old_phase = self.extrap_phase[:, beg+1].reshape(self.n_harm, 1)
freq = self.H[:, 2, beg+1].reshape(self.n_harm, 1)
self.extrap_phase[:, beg-window.N+1:beg+1] = \
2*np.pi*freq*n_beg+old_phase
old_phase = self.extrap_phase[:, end].reshape(self.n_harm, 1)
freq = self.H[:, 2, end].reshape(self.n_harm, 1)
self.extrap_phase[:, end+1:end+window.N+1] = \
2*np.pi*freq*n_end+old_phase
"""
Creates the sample window object.
"""
class SampleWindow(object):
def __init__(self, window_duration, fs):
self.dur = window_duration # in seconds
self.length = int(self.dur*fs+1)
if not self.length %2:
self.length -= 1
self.data = np.hamming(self.length)
self.data2 = self.data**2
self.N = int(self.dur*fs/2)
self.half_len_vec = np.arange(self.N+1)
self.len_vec = np.arange(-self.N, self.N+1)
self.a0 = 0.54**2 + (0.46**2)/2
self.a1 = 0.54*0.46
self.a2 = (0.46**2)/4
self.R0_diag = R_eq(0, g0, self)
self.R2_diag = sum(self.data2*(self.len_vec**2))
"""
--------------------------------------------
Main Functions.
--------------------------------------------
"""
"""
Main QHM function.
"""
def qhm(signal, pitch, window, samp_jump=None, N_iter=1, phase_tech='phase'):
return HM_run(qhm_iteration, signal, pitch, window, samp_jump, N_iter,
phase_tech)
"""
Main aQHM function.
"""
def aqhm(signal, previous_HM, pitch, window, samp_jump=None, N_iter=1,
N_runs=float('Inf'), phase_tech='phase', eaQHM_flag=False):
count = 1
outflag = False
while outflag is False:
func_options = [previous_HM, eaQHM_flag, 0]
HM = HM_run(aqhm_iteration, signal, pitch, window, samp_jump, N_iter,
phase_tech, func_options)
if count == 1:
previous_HM = HM
elif (count > 1 and HM.SRER > previous_HM.SRER):
previous_HM = HM
else:
outflag = True
count += 1
if count > N_runs:
outflag = True
return previous_HM
"""
Main eaQHM function (which in fact varies very few from the aQHM).
"""
def eaqhm(signal, previous_HM, pitch, window, samp_jump=None, N_iter=1,
N_runs=float('Inf'), phase_tech='phase'):
return aqhm(signal, previous_HM, pitch, window, samp_jump, N_iter, N_runs,
phase_tech, eaQHM_flag=True)
"""
Parser for the three algorithms.
"""
def HM_run(func, signal, pitch, window, samp_jump=None, N_iter=1,
phase_tech='phase', func_options=None):
# Creates the output signal object and the dummy frequency vector.
HM = ModulatedSign(signal.n_harm, signal.size, signal.fs, phase_tech)
freq = np.zeros(signal.n_harm)
# Selects whether the extration will be performed with temporal jumps or
# not.
if samp_jump is None:
voiced_frames = np.nonzero(pitch.values)[0]
else:
jump = int(np.fix(max(samp_jump*signal.fs, 1.0)))
voiced_frames = np.array([], dtype=int)
for beg, end in zip(pitch.edges[::2], pitch.edges[1::2]):
voiced_frames = np.append(voiced_frames, np.arange(
beg+1, end-1, jump))
voiced_frames = np.append(voiced_frames, end)
# Run the algorithm in the selected voiced frames.
for frame in voiced_frames:
# Uses the pitch value and the harmonic definition f_k = k*f0 to create
# a frequency reference vector, which is employed to keep each component
# within a frquency band and thus, avoiding least-squares instability.
f0_ref = pitch.values[frame]*np.arange(1, signal.n_harm+1)/signal.fs
# Set some algorithm options.
if func is qhm_iteration:
if frame-1 in pitch.edges[::2]:
freq[:] = f0_ref
func_options = freq
elif func is aqhm_iteration:
func_options[2] = frame
# Core algorithm function.
coef, freq, HM.error[frame] = func(
signal.data[frame-window.N:frame+window.N+1],
f0_ref, window, signal.fs, 20.0, func_options,
N_iter)
# Updates frame parameter values in the 3-dimension storage array H.
HM.update_values(coef[:signal.n_harm], freq, frame)
# If the extraction was performed with temporal jumps, interpolate the
# results.
if samp_jump is not None:
HM.interpolate_samp(voiced_frames, pitch.values)
HM.synthesize()
HM.srer(signal.data, pitch.values)
HM.phase_edges(pitch.edges, window)
return HM
"""
Core QHM function.
"""
def qhm_iteration(data, f0_ref, window, fs, max_step, freq, N_iter=1):
# Initialize and allocate variables.
K = len(freq)
coef = np.zeros((2*K))
E = np.ones((window.length, 2*K), dtype=complex)
E = exp_matrix(E, freq, window, K)
E_windowed = np.ones((window.length, 2*K), dtype=complex)
windowed_data = (window.data*data).reshape(window.length, 1)
# Run the QHM algorithm N_iter times.
for k in range(N_iter):
# Calculate the a and b coeficients via least-squares.
coef = least_squares(E, E_windowed, windowed_data, window, K)
# Set a magnitude reference, which is used to detect and supress
# erroneous magnitude spikes.
mag_ref = np.abs(coef[0])
# Updates the frequency values.
freq, ro = freq_correction(coef[:K], coef[K:], freq, f0_ref, mag_ref, K,
max_step, fs)
# Updates the complex exponentials matrix.
E = exp_matrix(E, freq, window, K)
# Compute the final coefficients values.
coef = least_squares(E, E_windowed, windowed_data, window, K)
# This part is a workaround not present in the original references [1-3].
# It was created to detect and supress erroneous magnitude spikes, which
# degradate the final synthsized signal and consequently, its SRER.
# Alternatively, the magnitude signals could be smoothed after extraction.
# For more details, check the README file.
cond = (np.abs(coef[:K]) < 5.5*np.abs(coef[0]))
if not cond.all():
freq[~cond] = f0_ref[~cond]
# Updates the complex exponentials matrix with the modified frequencies.
E = exp_matrix(E, freq, window, K)
# Recalculate the final coefficients.
coef = least_squares(E, E_windowed, windowed_data, window, K)
# Calculate the mean squared error between the original frame and the
# synthesized one.
err = error_calc(windowed_data, E, coef, window)
return coef, freq, err
"""
Core aQHM and eaQHM function.
"""
def aqhm_iteration(data, f0_ref, window, fs, max_step, func_options,
N_iter=1):
# Initialize and allocate variables.
previous_HM = func_options[0]
eaQHM_flag = func_options[1]
frame = func_options[2]
freq = previous_HM.H[:, 2, frame]
windowed_data = (window.data*data).reshape(window.length, 1)
# Set a magnitude reference, which is used to detect and supress
# erroneous magnitude spikes.
mag_ref = np.abs(previous_HM.H[0, 0, frame])
# Ajust the phase frame.
extrap_phase_center = previous_HM.extrap_phase[:, frame].reshape(
previous_HM.n_harm, 1)
phase_frame = previous_HM.extrap_phase[:, frame-window.N:frame+window.N+1] - \
extrap_phase_center
# Initialize the coefficients.
coef = np.vstack((previous_HM.H[:, 0, frame].reshape(previous_HM.n_harm, 1) *
np.exp(1j*extrap_phase_center), np.zeros((previous_HM.n_harm, 1))))[:, 0]
# Initialize the matrices.
E = np.ones((window.length, 2*previous_HM.n_harm), dtype=complex)
E_ro = np.ones((window.length, 2*previous_HM.n_harm), dtype=complex)
E_windowed = np.ones((window.length, 2*previous_HM.n_harm), dtype=complex)
E[:, :previous_HM.n_harm] = np.exp(1j*phase_frame.T)
# If the eaQHM algorithm was selected, ajust the exponential matrix with
# the normalized magnitude.
if eaQHM_flag:
mag_center = previous_HM.H[:, 0, frame].reshape(previous_HM.n_harm, 1)
mag_frame = previous_HM.H[:, 0, frame-window.N:frame+window.N+1] / \
mag_center
E[:, :previous_HM.n_harm] = mag_frame.T*E[:, :previous_HM.n_harm]
E[:, previous_HM.n_harm:] = E[:, :previous_HM.n_harm] * \
window.len_vec.reshape(window.length, 1)
# Run the aQHM/eaQHM algorithm N_iter times.
for k in range(N_iter):
# Calculate the a and b coeficients via least-squares.
coef = least_squares(E, E_windowed, windowed_data, window,
previous_HM.n_harm)
# Updates the frequency values.
freq, ro = freq_correction(coef[:previous_HM.n_harm],
coef[previous_HM.n_harm:], freq, f0_ref,
mag_ref, previous_HM.n_harm, max_step, fs)
# Updates the complex exponentials matrix.
E = E*exp_matrix(E_ro, ro/(2*np.pi), window, previous_HM.n_harm)
# Compute the final coefficients values.
coef = least_squares(E, E_windowed, windowed_data, window,
previous_HM.n_harm)
# This part is a workaround not present in the original references [1-3].
# It was created to detect and supress erroneous magnitude spikes, which
# degradate the final synthsized signal and consequently, its SRER.
# Alternatively, the magnitude signals could be smoothed after extraction.
# For more details, check the README file.
cond = (np.abs(coef[:previous_HM.n_harm]) < 5.5*mag_ref)
if not cond.all():
freq[~cond] = f0_ref[~cond]
# Since that the troubling aQHM/eaQHM exponentials are degradating the
# results, they are replaced by the QHM version, which is more stable.
E[:, ~np.append(cond, cond)] = exp_matrix(E_ro, freq, window,
previous_HM.n_harm)[:, ~np.append(cond, cond)]
# Recalculate the final coefficients.
coef = least_squares(E, E_windowed, windowed_data, window,
previous_HM.n_harm)
# Calculate the mean squared error between the original frame and the
# synthsized one.
err = error_calc(windowed_data, E, coef, window)
return coef, freq, err
"""
--------------------------------------------
Auxiliary Functions.
--------------------------------------------
"""
"""
Calculate the a and b coeficients via least-squares method.
"""
def least_squares(E, E_windowed, windowed_data, window, K):
R = np.zeros((2*K, 2*K), dtype=complex)
B = np.zeros((window.length, 1), dtype=complex)
E_windowed[:, :] = E*window.data.reshape(window.length, 1)
R = E_windowed.conj().T.dot(E_windowed)
B = E_windowed.conj().T.dot(windowed_data)
coef = np.linalg.solve(R, B)[:, 0]
return coef
"""
Calculates the frequency mismatch and updates the frequency values.
"""
def freq_correction(a, b, freq, f0_ref, mag_ref, n_harm, max_step, fs):
old_freq = np.zeros(n_harm)
old_freq[:] = freq[:]
ro = (a.real*b.imag-a.imag*b.real)/(np.abs(a)*np.abs(a))
# If the mismatch is too high (>20Hz), the frequency update is satured to
# this value. This avoids big fluctuations, which can spoil the algorithms
# convergence as whole.
over_ro = np.abs(ro) > max_step*2*np.pi/fs
ro[over_ro] = np.sign(ro[over_ro])*(max_step*2*np.pi/fs)
freq = freq+ro/(2*np.pi)
# Checks whether each component frequency lies within its spectral band and
# also checks whether there are magnitude spikes.
cond = ((np.round(freq/f0_ref[0]) != np.arange(n_harm)+1) |
(freq > 0.5) | (freq < 0) | (np.abs(a) > 5.5*mag_ref))
freq[cond] = f0_ref[cond]
return freq, (freq-old_freq)*(2*np.pi)
"""
Calculate the mean squared error between the original frame and the
synthsized one.
"""
def error_calc(windowed_data, E, coef, window):
h = E.dot(coef)
err = np.sum((windowed_data-2*h.real*window.data)**2)
return err
"""
Mounts the complex exponentials matrix.
"""
def exp_matrix(E, freq, window, K):
E[window.N+1:, :K] = np.exp(1j*np.pi*2*freq)
E[window.N+1:, :K] = np.cumprod(E[window.N+1:, :K], axis=0)
E[:window.N, :K] = np.conj(E[window.N+1:, :K][::-1, :])
E[:, K:] = E[:, :K]*window.len_vec.reshape(window.length, 1)
return E
"""
Some side functions found in reference [2].
"""
def g0(x, N):
if x != 0:
return np.sin((2*N+1)*x/2)/np.sin(x/2)
else:
return 2*N+1
def g1(x, N):
if x != 0:
return 1j*((np.sin(N*x)/(2*np.sin(x/2)**2)) -
N*(np.cos((2*N+1)*x/2)/np.sin(x/2)))
else:
return 0
def R_eq(delta_f, func, window):
return (window.a0*func(2*np.pi*delta_f, window.N) +
func(2*np.pi*(delta_f+1./(2*window.N)), window.N)*window.a1 +
func(2*np.pi*(delta_f-1./(2*window.N)), window.N)*window.a1 +
func(2*np.pi*(delta_f+1./window.N), window.N)*window.a2 +
func(2*np.pi*(delta_f-1./window.N), window.N)*window.a2) | PypiClean |
/Flask-SignalBus-0.5.20.tar.gz/Flask-SignalBus-0.5.20/README.rst | Flask-SignalBus
===============
**Flask-SignalBus** adds to Flask-SQLAlchemy the capability to
*atomically* send messages (signals) over a message bus.
**Important note:** Flask-SignalBus does work with Flask-SQLAlchemy
2.5, and does not work with Flask-SQLAlchemy 3.0 or later. And for
this reason, SQLAlchemy 2.0 or later is not supported.
The Problem
```````````
In microservices, the temptation to do distributed transactions pops
up all the time.
*Distributed transaction*:
any situation where a single event results in the mutation of two
separate sources of data which cannot be committed atomically
One practical and popular solution is to pick one of the services to
be the primary handler for some particular event. This service will
handle the original event with a single commit, and then take
responsibility for asynchronously communicating the secondary effects
to other services via a message bus of some sort (RabbitMQ, Kafka,
etc.).
Thus, the processing of each "distributed" event involves three steps:
1. As part of the original event transaction, one or more messages
are recorded in the SQL database of the primary handler service
(as rows in tables).
2. The messages are sent over the message bus.
3. Messages' corresponding table rows are deleted.
*Flask-SignalBus* automates this process and make is less error prone.
It automatically sends the recorded messages after each transaction
commit (steps 2 and 3). Also, when needed, the sending of the recorded
messages can be triggered explicitly with a method call, or through
the Flask command line interface.
You can read the docs `here`_.
.. _here: https://flask-signalbus.readthedocs.io/en/latest/
| PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/model.py | from AccessControl.interfaces import IUser
from AccessControl.PermissionRole import _what_not_even_god_should_do
from AccessControl.SecurityInfo import ClassSecurityInfo
from Acquisition import aq_chain
from Acquisition import aq_inner
from Acquisition import aq_parent
from collections import defaultdict
from euphorie.client.client import IClient
from euphorie.client.config import LOCKING_ACTIONS
from euphorie.client.config import LOCKING_SET_ACTIONS
from euphorie.client.enum import Enum
from OFS.interfaces import IApplication
from plone import api
from plone.app.event.base import localized_now
from plone.memoize import ram
from plone.memoize.instance import memoize
from Products.CMFPlone.utils import safe_nativestring
from Products.CMFPlone.utils import safe_unicode
from Products.Five import BrowserView
from sqlalchemy import func
from sqlalchemy import orm
from sqlalchemy import schema
from sqlalchemy import sql
from sqlalchemy import types
from sqlalchemy.event import listen
from sqlalchemy.ext.declarative.extensions import instrument_declarative
from sqlalchemy.orm.decl_base import _declarative_constructor
from sqlalchemy.sql import functions
from z3c.saconfig import Session
from zope.component.hooks import getSite
from zope.deprecation import deprecate
from zope.interface import implementer
from zope.interface import Interface
from zope.sqlalchemy import datamanager
import Acquisition
import bcrypt
import datetime
import logging
import OFS.Traversable
import pytz
import random
import re
BCRYPTED_PATTERN = re.compile(r"^\$2[aby]?\$\d{1,2}\$[.\/A-Za-z0-9]{53}$")
metadata = schema.MetaData()
log = logging.getLogger(__name__)
def _forever_cache_key(func, self, *args):
"""Cache this function call forever."""
return (func.__name__, self.password, args)
def GenerateSecret(length=32):
"""Return random data."""
secret = ""
for i in range(length):
secret += chr(random.getrandbits(8))
return secret
class BaseObject(OFS.Traversable.Traversable, Acquisition.Implicit):
"""Zope 2-style base class for our models.
This base class allows SQL based objects to act like normal Zope 2
objects. In particular it allows acquisition to find skin objects
and keeps absolute_url() and getPhysicalPath() working.
"""
__init__ = _declarative_constructor
__allow_access_to_unprotected_subobjects__ = True
__new__ = object.__new__
security = ClassSecurityInfo()
def getId(self):
return str(self.id)
@security.public
def getPhysicalPath(self):
# Get the physical path of the object.
#
# We need to override this because the new Zope implementations
# uses self.id instead of self.getId, which make a big difference in our case
id = self.getId()
path = (id,)
p = aq_parent(aq_inner(self))
if p is None:
return path
func = self.getPhysicalPath.__func__
while p is not None:
if func is p.getPhysicalPath.__func__:
pid = p.getId()
path = (pid,) + path
p = aq_parent(aq_inner(p))
else:
if IApplication.providedBy(p):
path = ("",) + path
else:
path = p.getPhysicalPath() + path
break
return path
class SurveyTreeItem(BaseObject):
"""A tree of questions.
The data is stored in the form of a materialized tree. The path is
built using a list of item numbers. Each item number has three
digits and uses 0-prefixing to make sure we can use simple string
sorting to produce a sorted tree.
"""
__tablename__ = "tree"
__table_args__ = (
schema.UniqueConstraint("session_id", "path"),
schema.UniqueConstraint("session_id", "zodb_path", "profile_index"),
{},
)
id = schema.Column(types.Integer(), primary_key=True, autoincrement=True)
session_id = schema.Column(
types.Integer(),
schema.ForeignKey("session.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
parent_id = schema.Column(
types.Integer(),
schema.ForeignKey("tree.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=True,
index=True,
)
type = schema.Column(
Enum(["risk", "module"]),
nullable=False,
index=True,
)
path = schema.Column(
types.String(40),
nullable=False,
index=True,
)
has_description = schema.Column(
types.Boolean(),
default=False,
index=True,
)
zodb_path = schema.Column(
types.String(512),
nullable=False,
)
profile_index = schema.Column(
types.Integer(),
default=0,
nullable=False,
)
depth = schema.Column(
types.Integer(),
default=0,
nullable=False,
index=True,
)
title = schema.Column(types.Unicode(512))
postponed = schema.Column(types.Boolean())
skip_children = schema.Column(
types.Boolean(),
default=False,
nullable=False,
)
__mapper_args__ = dict(polymorphic_on=type)
session = orm.relationship(
"SurveySession",
cascade="all",
)
# parent = orm.relationship("SurveyTreeItem", uselist=False)
@property
def parent(self):
# XXX Evil! Figure out why the parent relation does not work
return self.parent_id and Session.query(SurveyTreeItem).get(self.parent_id)
def getId(self):
return self.path[-3:].lstrip("0")
@property
def short_path(self):
def slice(path):
while path:
yield path[:3].lstrip("0")
path = path[3:]
return slice(self.path)
@property
def number(self):
return ".".join(self.short_path)
def children(self, filter=None):
query = (
Session.query(SurveyTreeItem)
.filter(SurveyTreeItem.session_id == self.session_id)
.filter(SurveyTreeItem.depth == self.depth + 1)
)
if self.path:
query = query.filter(SurveyTreeItem.path.like(self.path + "%"))
if filter is not None:
query = query.filter(filter)
return query.order_by(SurveyTreeItem.path)
def siblings(self, klass=None, filter=None):
if not self.path:
return []
if klass is None:
klass = SurveyTreeItem
query = (
Session.query(klass)
.filter(klass.session_id == self.session_id)
.filter(klass.parent_id == self.parent_id)
)
if filter is not None:
query = query.filter(sql.or_(klass.id == self.id, filter))
return query.order_by(klass.path)
def addChild(self, item):
sqlsession = Session()
query = (
sqlsession.query(SurveyTreeItem.path)
.filter(SurveyTreeItem.session_id == self.session_id)
.filter(SurveyTreeItem.depth == self.depth + 1)
)
if self.path:
query = query.filter(SurveyTreeItem.path.like(self.path + "%"))
last = query.order_by(SurveyTreeItem.path.desc()).first()
if not last:
index = 1
else:
index = int(last[0][-3:]) + 1
item.session = self.session
item.depth = self.depth + 1
item.path = (self.path or "") + "%03d" % index
item.parent_id = self.id
if self.profile_index != -1:
item.profile_index = self.profile_index
sqlsession.add(item)
self.session.touch()
return item
def removeChildren(self, excluded=[]):
if self.id not in excluded:
excluded.append(self.id)
session = Session()
if self.path:
filter = sql.and_(
SurveyTreeItem.session_id == self.session_id,
SurveyTreeItem.path.like(self.path + "%"),
sql.not_(SurveyTreeItem.id.in_(excluded)),
)
else:
filter = sql.and_(
SurveyTreeItem.session_id == self.session_id,
sql.not_(SurveyTreeItem.id.in_(excluded)),
)
removed = session.query(SurveyTreeItem).filter(filter).all()
session.execute(SurveyTreeItem.__table__.delete().where(filter))
self.session.touch()
datamanager.mark_changed(session)
return removed
class Group(BaseObject):
__tablename__ = "group"
group_id = schema.Column(
types.Unicode(32),
primary_key=True,
)
parent_id = schema.Column(
types.Unicode(32),
schema.ForeignKey("group.group_id"),
)
short_name = schema.Column(
types.Unicode(32),
)
long_name = schema.Column(
types.Unicode(256),
)
responsible_id = schema.Column(
types.Unicode(32),
)
responsible_fullname = schema.Column(
types.Unicode(32),
)
deactivated = schema.Column(
types.DateTime,
nullable=True,
default=None,
)
parent = orm.relationship(
"Group",
back_populates="children",
remote_side=[group_id],
)
children = orm.relationship(
"Group",
back_populates="parent",
remote_side=[parent_id],
)
accounts = orm.relationship(
"Account",
back_populates="group",
)
brand = schema.Column(types.String(64))
sessions = orm.relationship(
"SurveySession",
back_populates="group",
order_by="SurveySession.modified",
cascade="all, delete-orphan",
)
# Allow this class to be subclassed in other projects
__mapper_args__ = {
"polymorphic_identity": "euphorie",
"polymorphic_on": brand,
"with_polymorphic": "*",
}
@property
def fullname(self):
"""This is the name that will be display in the selectors and in the
tree widget."""
title = "{obs}{name}".format(
obs="[obs.] " if not self.deactivated else "",
name=self.short_name or self.group_id,
)
if self.responsible_fullname:
title += f", {self.responsible_fullname}"
return title
@property
def descendantids(self):
"""Return all the groups in the hierarchy flattened."""
structure = self._group_structure
ids = []
def get_ids(groupid):
new_ids = structure[groupid]
if not new_ids:
return
ids.extend(new_ids)
tuple(map(get_ids, new_ids))
get_ids(self.group_id)
return ids
@property
def descendants(self):
"""Return all the groups in the hierarchy flattened."""
return list(
Session.query(self.__class__).filter(
self.__class__.group_id.in_(self.descendantids)
)
)
@property
def parents(self):
"""Return all the groups in the hierarchy flattened."""
group = self
parents = []
while True:
parent = group.parent
if not parent:
return parents
parents.append(parent)
group = parent
@property
@memoize
def _group_structure(self):
"""Return a dict like structure with the group ids as keys and the
children group ids as values."""
tree = defaultdict(set)
for groupid, parentid in Session.query(Group.group_id, Group.parent_id).filter(
Group.parent_id != None # noqa: E711
):
tree[parentid].add(groupid)
return tree
@property
def acquired_sessions(self):
"""All the session relative to this group and its children."""
group_ids = [self.group_id]
group_ids.extend(g.group_id for g in self.descendants)
return (
Session.query(SurveySession)
.filter(SurveySession.group_id.in_(group_ids))
.all()
)
class Consultancy(BaseObject):
"""Information about consultancy on a session."""
__tablename__ = "consultancy"
session_id = schema.Column(
schema.ForeignKey("session.id", onupdate="CASCADE", ondelete="CASCADE"),
primary_key=True,
)
account_id = schema.Column(
schema.ForeignKey("account.id", onupdate="CASCADE", ondelete="SET NULL"),
nullable=True,
)
session = orm.relationship(
"SurveySession",
uselist=False,
back_populates="consultancy",
)
account = orm.relationship(
"Account",
uselist=False,
back_populates="consultancy",
)
status = schema.Column(
types.Unicode(255),
default="pending",
)
@implementer(IUser)
class Account(BaseObject):
"""A user account.
Users have to register with euphorie before they can start a survey
session. A single account can have multiple survey sessions.
"""
__tablename__ = "account"
id = schema.Column(
types.Integer(),
primary_key=True,
autoincrement=True,
)
loginname = schema.Column(
types.String(255),
nullable=False,
index=True,
unique=True,
)
password = schema.Column(types.Unicode(64))
tc_approved = schema.Column(types.Integer())
account_type = schema.Column(
Enum(["guest", "converted", "full"]),
default="full",
nullable=True,
)
group_id = schema.Column(
types.Unicode(32),
schema.ForeignKey("group.group_id"),
)
created = schema.Column(
types.DateTime,
nullable=True,
default=functions.now(),
)
last_login = schema.Column(
types.DateTime,
nullable=True,
default=None,
)
first_name = schema.Column(
types.Unicode(),
nullable=True,
default=None,
)
last_name = schema.Column(
types.Unicode(),
nullable=True,
default=None,
)
consultancy = orm.relationship(
"Consultancy",
uselist=False,
back_populates="account",
)
group = orm.relationship(
Group,
back_populates="accounts",
)
@property
def groups(self):
group = self.group
if not group:
return []
groups = [group]
groups.extend(group.descendants)
return groups
@property
def acquired_sessions(self):
"""The session the account acquires because he belongs to a group."""
group = self.group
if not group:
return []
return list(group.acquired_sessions)
@property
def group_sessions(self):
"""The session the account acquires because he belongs to a group."""
group = self.group
if not group:
return []
return list(group.sessions)
@property
def email(self):
"""Email addresses are used for login, return the login."""
return self.loginname
@property
def login(self):
"""This synchs naming with :obj:`euphorie.content.user.IUser` and is
needed by the authentication tools."""
return self.loginname
@property
def title(self):
"""Return the joined first_name and last_name of the account if
present.
If they are not fallback to the loginname
"""
return (
" ".join((self.first_name or "", self.last_name or "")).strip()
or self.loginname
)
def getUserName(self):
"""Return the login name."""
return self.loginname
def getGroups(self):
return ["AuthenticatedUsers"]
def getRoles(self):
"""Return all global roles for this user."""
return ("EuphorieUser",)
def getRolesInContext(self, object):
"""Return the roles of the user in the current context (same as
:obj:`getRoles`).
"""
return self.getRoles()
def getDomains(self):
return []
def addPropertysheet(self, propfinder_id, data):
pass
def _addGroups(self, group_ids):
pass
def _addRoles(self, role_ids):
pass
def has_permission(self, perm, context):
"""Check if the user has a permission in a context."""
return perm == "Euphorie: View a Survey"
def allowed(self, context, object_roles=None):
"""Check if this account has any of the requested roles in the context
of `object`."""
if object_roles is _what_not_even_god_should_do:
return False
if object_roles is None:
return True
for obj in aq_chain(aq_inner(context)):
if IClient.providedBy(obj):
allowed_roles = {"Anonymous", "Authenticated", "EuphorieUser", "Reader"}
return bool(allowed_roles & set(object_roles))
return False
@ram.cache(_forever_cache_key)
def verify_password(self, password):
"""Verify the given password against the one stored in the account
table."""
if not password:
return False
if not isinstance(password, str):
return False
if password == self.password:
return True
password = safe_nativestring(password)
return bcrypt.checkpw(password, self.password)
def hash_password(self):
"""Hash the account password using bcrypt."""
try:
password = self.password
except AttributeError:
return
if not password:
return
password = safe_nativestring(password)
if BCRYPTED_PATTERN.match(password):
# The password is already encrypted, do not encrypt it again
# XXX this is broken with passwords that are actually an hash
return
self.password = safe_unicode(
bcrypt.hashpw(
password,
bcrypt.gensalt(),
)
)
def account_before_insert_subscriber(mapper, connection, account):
account.hash_password()
account_before_update_subscriber = account_before_insert_subscriber
listen(Account, "before_insert", account_before_insert_subscriber)
listen(Account, "before_update", account_before_update_subscriber)
class AccountChangeRequest(BaseObject):
__tablename__ = "account_change"
id = schema.Column(types.String(16), primary_key=True, nullable=False)
account_id = schema.Column(
types.Integer(),
schema.ForeignKey(Account.id, onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
unique=True,
)
account = orm.relationship(
Account,
back_populates="change_request",
)
value = schema.Column(
types.String(255),
nullable=False,
)
expires = schema.Column(
types.DateTime(),
nullable=False,
)
Account.change_request = orm.relationship(
AccountChangeRequest,
back_populates="account",
cascade="all, delete-orphan",
uselist=False,
)
class ISurveySession(Interface):
"""Marker interface for a SurveySession object."""
@implementer(ISurveySession)
class SurveySession(BaseObject):
"""Information about a user's session."""
__tablename__ = "session"
id = schema.Column(types.Integer(), primary_key=True, autoincrement=True)
brand = schema.Column(types.String(64))
account_id = schema.Column(
types.Integer(),
schema.ForeignKey(Account.id, onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
last_modifier_id = schema.Column(
types.Integer(),
schema.ForeignKey(Account.id, onupdate="CASCADE", ondelete="CASCADE"),
nullable=True,
index=False,
)
last_publisher_id = schema.Column(
types.Integer(),
schema.ForeignKey(Account.id, onupdate="CASCADE", ondelete="CASCADE"),
nullable=True,
index=False,
)
group_id = schema.Column(
types.Unicode(32),
schema.ForeignKey("group.group_id"),
)
title = schema.Column(types.Unicode(512))
created = schema.Column(
types.DateTime,
nullable=False,
default=functions.now(),
)
modified = schema.Column(
types.DateTime,
nullable=False,
default=functions.now(),
)
refreshed = schema.Column(
types.DateTime,
nullable=False,
default=functions.now(),
)
published = schema.Column(
types.DateTime,
nullable=True,
default=None,
)
archived = schema.Column(
types.DateTime(timezone=True),
nullable=True,
default=None,
)
zodb_path = schema.Column(types.String(512), nullable=False)
report_comment = schema.Column(types.UnicodeText())
account = orm.relationship(
Account,
back_populates="sessions",
foreign_keys=[account_id],
)
last_modifier = orm.relationship(
Account,
foreign_keys=[last_modifier_id],
)
last_publisher = orm.relationship(
Account,
foreign_keys=[last_publisher_id],
)
group = orm.relationship(
Group,
back_populates="sessions",
)
consultancy = orm.relationship(
"Consultancy",
uselist=False,
back_populates="session",
)
migrated = schema.Column(
types.DateTime,
nullable=False,
default=functions.now(),
)
# Allow this class to be subclassed in other projects
__mapper_args__ = {
"polymorphic_identity": "euphorie",
"polymorphic_on": brand,
"with_polymorphic": "*",
}
@property
def is_archived(self):
archived = self.archived
if not archived:
return False
return archived <= localized_now()
@property
def last_locking_event(self):
"""Return the last event relative to locking"""
query = (
Session.query(SessionEvent)
.filter(
SessionEvent.action.in_(LOCKING_ACTIONS),
SessionEvent.session_id == self.id,
)
.order_by(SessionEvent.time.desc())
)
return query.first()
@property
def last_validation_event(self):
"""Return the last event relative to validation"""
query = (
Session.query(SessionEvent)
.filter(
SessionEvent.action.in_(
(
"validation_requested",
"validated",
"invalidated",
)
),
SessionEvent.session_id == self.id,
)
.order_by(SessionEvent.time.desc())
)
return query.first()
@property
def is_validated(self):
"""Check if the session is validated."""
event = self.last_validation_event
if not event:
return False
return event.action == "validated"
@property
def is_locked(self):
"""Check if the session is locked."""
if self.is_validated:
return True
event = self.last_locking_event
if not event:
return False
return event.action in LOCKING_SET_ACTIONS
@property
@deprecate(
"Deprecated in version 15.0.0.dev0. "
"You might want to use self.is_locked instead."
)
def review_state(self):
"""Check if it the published column.
If it has return 'published' otherwise return 'private'
"""
return "published" if self.is_locked else "private"
def hasTree(self):
return bool(
Session.query(SurveyTreeItem).filter(SurveyTreeItem.session == self).count()
)
def reset(self):
Session.query(SurveyTreeItem).filter(SurveyTreeItem.session == self).delete()
self.created = self.modified = datetime.datetime.now()
def touch(self):
self.last_modifier = get_current_account()
self.modified = datetime.datetime.now()
def refresh_survey(self, survey=None):
"""Mark the session with the current date to indicate that is has been
refreshed with the latest version of the Survey (from Zope).
If survey is passed, update all titles in the tree, based on the
CMS version of the survey, i.e. update all titles of modules and
risks. Those are used in the navigation. If a title change is
the only change in the CMS, the survey session is not re-
created. Therefore this method ensures that the titles are
updated where necessary.
"""
if survey:
query = Session.query(SurveyTreeItem).filter(
SurveyTreeItem.session_id == self.id
)
tree = query.all()
for item in tree:
if item.zodb_path.find("custom-risks") >= 0:
continue
zodb_item = survey.restrictedTraverse(item.zodb_path.split("/"), None)
if zodb_item and zodb_item.title != item.title:
item.title = zodb_item.title
self.refreshed = datetime.datetime.now()
def update_measure_types(self, survey):
"""Update measure types in the session according to changes in the
tool.
Specifically, if an `in_place_standard` measure is deleted in
the tool, it disappears from the identification phase of the
session unless we change its type to `in_place_custom`.
"""
in_place_standard_measures = (
Session.query(Risk, ActionPlan)
.filter(Risk.id == ActionPlan.risk_id)
.filter(Risk.session_id == self.id)
.filter(ActionPlan.plan_type == "in_place_standard")
.all()
)
for risk, measure in in_place_standard_measures:
risk_zodb = survey.restrictedTraverse(risk.zodb_path.split("/"))
solution_ids_zodb = [sol.id for sol in risk_zodb._solutions]
if measure.solution_id not in solution_ids_zodb:
# The measure is in the session but not in the tool. It has probably
# been deleted. Keep it visible by making it a custom measure.
measure.plan_type = "in_place_custom"
def addChild(self, item):
sqlsession = Session()
query = (
sqlsession.query(SurveyTreeItem.path)
.filter(SurveyTreeItem.session_id == self.id)
.filter(SurveyTreeItem.depth == 1)
.order_by(SurveyTreeItem.path.desc())
)
last = query.first()
if not last:
index = 1
else:
index = int(last[0][-3:]) + 1
item.session = self
item.depth = 1
item.path = "%03d" % index
item.parent_id = None
sqlsession.add(item)
self.touch()
return item
def children(self, filter=None):
query = (
Session.query(SurveyTreeItem)
.filter(SurveyTreeItem.session_id == self.id)
.filter(SurveyTreeItem.depth == 1)
)
if filter is not None:
query = query.filter(filter)
return query.order_by(SurveyTreeItem.path)
def copySessionData(self, other):
"""Copy all user data from another session to this one."""
session = Session()
# Copy all tree data to the new session (skip_children and postponed)
old_tree = orm.aliased(SurveyTreeItem, name="old_tree")
in_old_tree = sql.and_(
old_tree.session_id == other.id,
SurveyTreeItem.zodb_path == old_tree.zodb_path,
SurveyTreeItem.profile_index == old_tree.profile_index,
)
skip_children = sql.select([old_tree.skip_children], in_old_tree).limit(1)
postponed = sql.select([old_tree.postponed], in_old_tree).limit(1)
new_items = (
session.query(SurveyTreeItem)
.filter(SurveyTreeItem.session == self)
.filter(sql.exists(sql.select([old_tree.id]).where(in_old_tree)))
)
new_items.update(
{"skip_children": skip_children, "postponed": postponed},
synchronize_session=False,
)
# Mandatory modules must have skip_children=False. It's possible that
# the module was optional with skip_children=True and now after the
# update it's mandatory. So we must check and correct.
# In case a risk was marked as "always present", be sure its
# identification gets set to 'no'
preset_to_no = []
survey = getSite()["client"].restrictedTraverse(self.zodb_path)
for item in new_items.all():
if item.type == "risk":
if item.identification == "no":
preset_to_no.append(item.risk_id)
elif item.type == "module":
module = survey.restrictedTraverse(item.zodb_path.split("/"))
if not module.optional:
item.skip_children = False
# Copy all risk data to the new session
# This triggers a "Only update via a single table query is currently
# supported" error with SQLAlchemy 0.6.6
# old_risk = orm.aliased(Risk.__table__, name='old_risk')
# is_old_risk = sql.and_(in_old_tree, old_tree.id == old_risk.id)
# identification = sql.select([old_risk.identification], is_old_risk)
# new_risks = session.query(Risk)\
# .filter(Risk.session == self)\
# .filter(sql.exists(
# sql.select([SurveyTreeItem.id]).where(sql.and_(
# SurveyTreeItem.id == Risk.id,
# sql.exists([old_tree.id]).where(sql.and_(
# in_old_tree, old_tree.type == 'risk'))))))
# new_risks.update({'identification': identification},
# synchronize_session=False)
skip_preset_to_no_clause = ""
if len(preset_to_no):
skip_preset_to_no_clause = "old_risk.risk_id not in %s AND" % (
str([str(x) for x in preset_to_no]).replace("[", "(").replace("]", ")")
)
statement = """\
UPDATE RISK
SET identification = old_risk.identification,
frequency = old_risk.frequency,
effect = old_risk.effect,
probability = old_risk.probability,
priority = old_risk.priority,
existing_measures = old_risk.existing_measures,
comment = old_risk.comment
FROM risk AS old_risk JOIN tree AS old_tree ON old_tree.id=old_risk.id, tree
WHERE tree.id=risk.id AND
%(skip_preset_to_no_clause)s
tree.session_id=%(new_sessionid)s AND
old_tree.session_id=%(old_sessionid)s AND
old_tree.zodb_path=tree.zodb_path AND
old_tree.profile_index=tree.profile_index;
""" % dict( # noqa: E501
old_sessionid=other.id,
new_sessionid=self.id,
skip_preset_to_no_clause=skip_preset_to_no_clause,
)
session.execute(statement)
statement = """\
INSERT INTO action_plan (risk_id, action_plan, prevention_plan, action,
requirements, responsible, budget, plan_type,
planning_start, planning_end,
solution_id, used_in_training)
SELECT new_tree.id,
action_plan.action_plan,
action_plan.prevention_plan,
action_plan.action,
action_plan.requirements,
action_plan.responsible,
action_plan.budget,
action_plan.plan_type,
action_plan.planning_start,
action_plan.planning_end,
action_plan.solution_id,
action_plan.used_in_training
FROM action_plan JOIN risk ON action_plan.risk_id=risk.id
JOIN tree ON tree.id=risk.id,
tree AS new_tree
WHERE tree.session_id=%(old_sessionid)d AND
new_tree.session_id=%(new_sessionid)d AND
tree.zodb_path=new_tree.zodb_path AND
tree.profile_index=new_tree.profile_index;
""" % {
"old_sessionid": other.id,
"new_sessionid": self.id,
}
session.execute(statement)
# Copy over previous session metadata. Specifically, we don't want to
# create a new modification timestamp, just because the underlying
# survey was updated.
statement = """\
UPDATE session
SET
modified = old_session.modified,
created = old_session.created,
last_modifier_id = old_session.last_modifier_id
FROM session as old_session
WHERE
old_session.id=%(old_sessionid)d AND
session.id=%(new_sessionid)d
""" % {
"old_sessionid": other.id,
"new_sessionid": self.id,
}
session.execute(statement)
session.query(Company).filter(Company.session == other).update(
{"session_id": self.id}, synchronize_session=False
)
@classmethod
def get_account_filter(cls, account=None):
"""Filter only the sessions for the given account.
:param acount: True means current account.
A falsish value means do not filter.
Otherwise try to interpret the user input:
a string or an int means the account_id should be that value,
an object account will be used to extract the account id,
from an iterable we will try to extract the account ids
"""
# TODO: this is too complex
include_organisation_members = False
if account is True:
include_organisation_members = True
account = get_current_account()
if isinstance(account, Account):
account = account.id
if not account:
return False
if not include_organisation_members and isinstance(account, (int, (str,))):
return cls.account_id == account
if include_organisation_members:
account_ids = {account}
# Add the owner id of the organisations where the account is member of
owner_memberships = Session.query(OrganisationMembership.owner_id).filter(
OrganisationMembership.member_id == account
)
account_ids.update(membership.owner_id for membership in owner_memberships)
else:
try:
# This works when we pass an iterable of accounts or ids
account_ids = {getattr(item, "id", item) for item in account}
except TypeError:
# this happens when account is not an iterable
log.error("Cannot understand the account parameter: %r", account)
raise
account_ids = {
item for item in account_ids if item and isinstance(item, (int, (str,)))
}
if not account_ids:
return False
if len(account_ids) == 1:
for account_id in account_ids:
return cls.get_account_filter(account_id)
return cls.account_id.in_(account_ids)
@classmethod
def get_group_filter(cls, group=None):
"""Filter only the sessions for the given group.
:param group: True means the current account's group.
A falsish value means do not filter.
Otherwise try to interpret the user input:
a string or an int means the group_id should be that value,
an object group will be used to extract the group id,
and from an iterable we will try to extract the group ids
"""
if group is True:
group = getattr(get_current_account(), "group_id", None)
if isinstance(group, Group):
group = group.group_id
if not group:
return False
if isinstance(group, (int, (str,))):
return cls.group_id == group
try:
group_ids = {getattr(item, "group_id", item) for item in group}
except TypeError:
log.error("Cannot understand the group parameter: %r", group)
raise
group_ids = {
item for item in group_ids if item and isinstance(item, (int, (str,)))
}
if not group_ids:
return False
if len(group_ids) == 1:
for group_id in group_ids:
return cls.get_group_filter(group_id)
return cls.group_id.in_(group_ids)
@classmethod
def get_archived_filter(cls):
"""Filter sessions that are archived."""
return sql.or_(
cls.archived >= localized_now(), cls.archived == None # noqa: E711
)
@classmethod
def _get_context_tools(cls, context):
"""Return the set of tools we can find under this context."""
if not context:
return set()
# Check the path relative to the client folder
if context.portal_type == "Plone Site":
context = context.client
if context.portal_type == "euphorie.survey":
return {context}
portal_type_filter = {
"portal_type": [
"euphorie.clientcountry",
"euphorie.clientsector",
"euphorie.survey",
]
}
surveys = set()
def _add_survey(container):
for obj in container.listFolderContents(portal_type_filter):
if obj.portal_type == "euphorie.survey":
surveys.add(obj)
else:
_add_survey(obj)
_add_survey(context)
return surveys
@classmethod
def get_context_filter(cls, context):
"""Filter sessions under this context using the zodb_path column."""
surveys = cls._get_context_tools(context)
if not surveys:
return False
return cls.zodb_path.in_(
{
safe_unicode("/".join(survey.getPhysicalPath()[-3:]))
for survey in surveys
}
)
@property
def tool(self):
client = api.portal.get().client
return client.restrictedTraverse(str(self.zodb_path), None)
@property
def traversed_session(self):
return self.tool.restrictedTraverse("++session++%s" % self.id)
def absolute_url(self):
"""The URL for this session is based on the tool's URL.
To it (if it can be fetched) we add a traverser with the session
id.
"""
client = api.portal.get().client
tool = client.unrestrictedTraverse(self.zodb_path, None)
if tool is None:
raise ValueError("No tool found for session %s" % self.id)
return f"{tool.absolute_url()}/++session++{self.id}"
@property
def country(self):
return str(self.zodb_path).split("/")[0]
@property
def completion_percentage(self):
module_query = (
Session.query(SurveyTreeItem)
.filter(SurveyTreeItem.session_id == self.id)
.filter(SurveyTreeItem.type == "module")
).order_by(SurveyTreeItem.path)
good_module_ids = set()
bad_module_ids = set()
for module in module_query:
if module.parent_id in bad_module_ids or module.skip_children:
bad_module_ids.add(module.id)
else:
good_module_ids.add(module.id)
if not good_module_ids:
return 0
total_risks_query = Session.query(Risk).filter(
Risk.parent_id.in_(good_module_ids)
)
total = total_risks_query.count()
if not total:
return 0
answered = float(
total_risks_query.filter(Risk.identification != None).count() # noqa: E711
)
completion_percentage = int(round(answered / total * 100.0))
return completion_percentage
Account.sessions = orm.relationship(
SurveySession,
back_populates="account",
foreign_keys=[SurveySession.account_id],
cascade="all, delete-orphan",
)
class SessionEvent(BaseObject):
"""Data table to record events happening on sessions."""
__tablename__ = "session_event"
id = schema.Column(types.Integer(), primary_key=True, autoincrement=True)
time = schema.Column(types.DateTime(), nullable=False, default=func.now())
account_id = schema.Column(
types.Integer(),
schema.ForeignKey(Account.id, onupdate="CASCADE"),
nullable=True,
)
account = orm.relationship(Account)
session_id = schema.Column(
types.Integer(),
schema.ForeignKey("session.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
session = orm.relationship(SurveySession, back_populates="events")
action = schema.Column(types.Unicode(32))
note = schema.Column(types.Unicode)
Account.session_events = orm.relationship(SessionEvent, back_populates="account")
SurveySession.events = orm.relationship(
SessionEvent, back_populates="session", cascade="all,delete-orphan"
)
class SessionRedirect(BaseObject):
"""Mapping of old deleted sessions to their new rebuilt counterparts"""
__tablename__ = "session_redirect"
old_session_id = schema.Column(types.Integer(), primary_key=True, nullable=False)
new_session_id = schema.Column(types.Integer(), nullable=False)
class Company(BaseObject):
"""Information about a company."""
__tablename__ = "company"
id = schema.Column(types.Integer(), primary_key=True, autoincrement=True)
session_id = schema.Column(
types.Integer(),
schema.ForeignKey("session.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
session = orm.relationship(SurveySession, back_populates="company")
country = schema.Column(types.String(3))
employees = schema.Column(Enum([None, "1-9", "10-49", "50-249", "250+"]))
conductor = schema.Column(Enum([None, "staff", "third-party", "both"]))
referer = schema.Column(
Enum(
[
None,
"employers-organisation",
"trade-union",
"national-public-institution",
"eu-institution",
"health-safety-experts",
"other",
]
)
)
workers_participated = schema.Column(types.Boolean())
needs_met = schema.Column(types.Boolean())
recommend_tool = schema.Column(types.Boolean())
timestamp = schema.Column(types.DateTime(), nullable=True)
SurveySession.company = orm.relationship(
Company, back_populates="session", cascade="all,delete-orphan", uselist=False
)
class Module(SurveyTreeItem):
"""A module.
This is a dummy object needed to be able to put modules in the
survey tree.
"""
__tablename__ = "module"
__mapper_args__ = dict(polymorphic_identity="module")
sql_module_id = schema.Column(
"id",
types.Integer(),
schema.ForeignKey(SurveyTreeItem.id, onupdate="CASCADE", ondelete="CASCADE"),
primary_key=True,
)
module_id = schema.Column(types.String(16), nullable=False)
solution_direction = schema.Column(types.Boolean(), default=False)
class Risk(SurveyTreeItem):
"""Answer to risk."""
__tablename__ = "risk"
__mapper_args__ = dict(polymorphic_identity="risk")
sql_risk_id = schema.Column(
"id",
types.Integer(),
schema.ForeignKey(SurveyTreeItem.id, onupdate="CASCADE", ondelete="CASCADE"),
primary_key=True,
)
risk_id = schema.Column(types.String(16), nullable=True)
risk_type = schema.Column(
Enum(["risk", "policy", "top5"]), default="risk", nullable=False, index=True
)
#: Skip-evaluation flag. This is only used to indicate if the sector
#: set the evaluation method to `fixed`, not for policy behaviour
#: such as not evaluation top-5 risks. That policy behaviour is
#: handled via the question_filter on client views so it can be modified
#: in custom deployments.
skip_evaluation = schema.Column(types.Boolean(), default=False, nullable=False)
is_custom_risk = schema.Column(types.Boolean(), default=False, nullable=False)
identification = schema.Column(Enum([None, "yes", "no", "n/a"]))
frequency = schema.Column(types.Integer())
effect = schema.Column(types.Integer())
probability = schema.Column(types.Integer())
priority = schema.Column(Enum([None, "low", "medium", "high"]))
comment = schema.Column(types.UnicodeText())
existing_measures = schema.Column(types.UnicodeText())
training_notes = schema.Column(types.UnicodeText())
custom_description = schema.Column(types.UnicodeText())
image_data = schema.Column(types.LargeBinary())
image_data_scaled = schema.Column(types.LargeBinary())
image_filename = schema.Column(types.UnicodeText())
@memoize
def measures_of_type(self, plan_type):
query = (
Session.query(ActionPlan)
.filter(
sql.and_(ActionPlan.risk_id == self.id),
ActionPlan.plan_type == plan_type,
)
.order_by(ActionPlan.id)
)
return query.all()
@property
def standard_measures(self):
return self.measures_of_type("measure_standard")
@property
def custom_measures(self):
return self.measures_of_type("measure_custom")
@property
def in_place_standard_measures(self):
return self.measures_of_type("in_place_standard")
@property
def in_place_custom_measures(self):
return self.measures_of_type("in_place_custom")
class ActionPlan(BaseObject):
"""Action plans for a known risk."""
__tablename__ = "action_plan"
id = schema.Column(types.Integer(), primary_key=True, autoincrement=True)
risk_id = schema.Column(
types.Integer(),
schema.ForeignKey(Risk.id, onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
action_plan = schema.Column(types.UnicodeText())
prevention_plan = schema.Column(types.UnicodeText())
# The column "action" is the synthesis of "action_plan" and "prevention_plan"
action = schema.Column(types.UnicodeText())
requirements = schema.Column(types.UnicodeText())
responsible = schema.Column(types.Unicode(256))
budget = schema.Column(types.Integer())
planning_start = schema.Column(types.Date())
planning_end = schema.Column(types.Date())
reference = schema.Column(types.Text())
plan_type = schema.Column(
Enum(
[
"measure_custom",
"measure_standard",
"in_place_standard",
"in_place_custom",
]
),
nullable=False,
index=True,
default="measure_custom",
)
solution_id = schema.Column(types.Unicode(20))
used_in_training = schema.Column(
types.Boolean(),
default=True,
index=True,
)
risk = orm.relationship(Risk, back_populates="action_plans")
Risk.action_plans = orm.relationship(
ActionPlan, back_populates="risk", cascade="all, delete-orphan"
)
class Training(BaseObject):
"""Data table to record trainings."""
__tablename__ = "training"
id = schema.Column(types.Integer(), primary_key=True, autoincrement=True)
time = schema.Column(types.DateTime(), nullable=True, default=func.now())
account_id = schema.Column(
types.Integer(),
schema.ForeignKey(Account.id, onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
account = orm.relationship(Account, back_populates="trainings")
session_id = schema.Column(
types.Integer(),
schema.ForeignKey("session.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
session = orm.relationship(
"SurveySession",
back_populates="trainings",
)
answers = schema.Column(types.Unicode, default="[]")
status = schema.Column(types.Unicode)
Account.trainings = orm.relationship(
Training, back_populates="account", cascade="all, delete-orphan"
)
SurveySession.trainings = orm.relationship(
Training, back_populates="session", cascade="all, delete-orphan"
)
class Organisation(BaseObject):
"""A table to store some data about an organisation."""
__tablename__ = "organisation"
organisation_id = schema.Column(
types.Integer(),
primary_key=True,
autoincrement=True,
)
owner_id = schema.Column(
types.Integer(),
schema.ForeignKey(Account.id, onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
title = schema.Column(types.UnicodeText())
image_data = schema.Column(types.LargeBinary())
image_data_scaled = schema.Column(types.LargeBinary())
image_filename = schema.Column(types.UnicodeText())
owner = orm.relationship(Account, back_populates="organisation")
Account.organisation = orm.relationship(
Organisation, back_populates="owner", cascade="all, delete-orphan", uselist=False
)
class OrganisationMembership(BaseObject):
"""This table wants to mimic the concept of an organisation for Euphorie.
The goal is to share permissions to work on sessions from another
user.
"""
__tablename__ = "organisation_membership"
organisation_id = schema.Column(
types.Integer(),
primary_key=True,
autoincrement=True,
)
owner_id = schema.Column(
types.Integer(),
schema.ForeignKey(Account.id, onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
member_id = schema.Column(
types.Integer(),
schema.ForeignKey(Account.id, onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
)
member_role = schema.Column(types.UnicodeText())
_instrumented = False
if not _instrumented:
metadata._decl_registry = {}
for cls in [
Consultancy,
SurveyTreeItem,
SurveySession,
SessionEvent,
SessionRedirect,
Module,
Risk,
ActionPlan,
Group,
Account,
AccountChangeRequest,
Company,
Training,
Organisation,
OrganisationMembership,
]:
instrument_declarative(cls, metadata._decl_registry, metadata)
orm.configure_mappers()
_instrumented = True
schema.Index("tree_session_path", SurveyTreeItem.session_id, SurveyTreeItem.path)
schema.Index(
"tree_zodb_path",
SurveyTreeItem.session_id,
SurveyTreeItem.profile_index,
SurveyTreeItem.zodb_path,
)
def _SKIPPED_PARENTS_factory():
# XXX This can be optimized by doing short-circuit on parent.type!=module
parent = orm.aliased(SurveyTreeItem)
return sql.exists().where(
sql.and_(
parent.session_id == SurveyTreeItem.session_id,
SurveyTreeItem.depth > parent.depth,
SurveyTreeItem.path.like(parent.path + "%"),
parent.skip_children == True, # noqa: E712
)
)
SKIPPED_PARENTS = _SKIPPED_PARENTS_factory()
NO_CUSTOM_RISKS_FILTER = sql.not_(
sql.and_(
SurveyTreeItem.type == "risk",
sql.exists(
sql.select([Risk.sql_risk_id]).where(
sql.and_(
Risk.sql_risk_id == SurveyTreeItem.id,
Risk.is_custom_risk == True, # noqa: E712
)
)
),
)
)
RISK_OR_MODULE_WITH_DESCRIPTION_FILTER = sql.or_(
SurveyTreeItem.type != "module", SurveyTreeItem.has_description
)
# Used by tno.euphorie
def _MODULE_WITH_RISK_FILTER_factory():
child_node = orm.aliased(SurveyTreeItem)
return sql.and_(
SurveyTreeItem.type == "module",
SurveyTreeItem.skip_children == False, # noqa: E712
sql.exists(
sql.select([child_node.id]).where(
sql.and_(
child_node.session_id == SurveyTreeItem.session_id,
child_node.id == Risk.sql_risk_id,
child_node.type == "risk",
Risk.identification == "no",
child_node.depth > SurveyTreeItem.depth,
child_node.path.like(SurveyTreeItem.path + "%"),
)
)
),
)
MODULE_WITH_RISK_FILTER = _MODULE_WITH_RISK_FILTER_factory()
def _MODULE_WITH_RISK_OR_TOP5_FILTER_factory():
child_node = orm.aliased(SurveyTreeItem)
return sql.and_(
SurveyTreeItem.type == "module",
SurveyTreeItem.skip_children == False, # noqa: E712
sql.exists(
sql.select([child_node.id]).where(
sql.and_(
child_node.session_id == SurveyTreeItem.session_id,
child_node.id == Risk.sql_risk_id,
child_node.type == "risk",
sql.or_(Risk.identification == "no", Risk.risk_type == "top5"),
child_node.depth > SurveyTreeItem.depth,
child_node.path.like(SurveyTreeItem.path + "%"),
)
)
),
)
MODULE_WITH_RISK_OR_TOP5_FILTER = _MODULE_WITH_RISK_OR_TOP5_FILTER_factory()
def _MODULE_WITH_RISK_TOP5_TNO_FILTER_factory():
child_node = orm.aliased(SurveyTreeItem)
return sql.and_(
SurveyTreeItem.type == "module",
SurveyTreeItem.skip_children == False, # noqa: E712
sql.exists(
sql.select([child_node.id]).where(
sql.and_(
child_node.session_id == SurveyTreeItem.session_id,
child_node.id == Risk.sql_risk_id,
child_node.type == "risk",
sql.or_(
Risk.identification == "no",
sql.and_(
Risk.risk_type == "top5",
sql.or_(
sql.not_(Risk.identification.in_(["n/a", "yes"])),
Risk.identification is None,
),
),
),
child_node.depth > SurveyTreeItem.depth,
child_node.path.like(SurveyTreeItem.path + "%"),
)
)
),
)
# Used by tno.euphorie
MODULE_WITH_RISK_TOP5_TNO_FILTER = _MODULE_WITH_RISK_TOP5_TNO_FILTER_factory()
def _MODULE_WITH_RISK_NO_TOP5_NO_POLICY_DO_EVALUTE_FILTER_factory():
child_node = orm.aliased(SurveyTreeItem)
return sql.and_(
SurveyTreeItem.type == "module",
SurveyTreeItem.skip_children == False, # noqa: E712
sql.exists(
sql.select([child_node.id]).where(
sql.and_(
child_node.session_id == SurveyTreeItem.session_id,
child_node.id == Risk.sql_risk_id,
child_node.type == "risk",
sql.not_(Risk.risk_type.in_(["top5", "policy"])),
sql.not_(Risk.skip_evaluation is True),
Risk.identification == "no",
child_node.depth > SurveyTreeItem.depth,
child_node.path.like(SurveyTreeItem.path + "%"),
)
)
),
)
MODULE_WITH_RISK_NO_TOP5_NO_POLICY_DO_EVALUTE_FILTER = (
_MODULE_WITH_RISK_NO_TOP5_NO_POLICY_DO_EVALUTE_FILTER_factory()
)
# Used by tno.euphorie
RISK_PRESENT_FILTER = sql.and_(
SurveyTreeItem.type == "risk",
sql.exists(
sql.select([Risk.sql_risk_id]).where(
sql.and_(Risk.sql_risk_id == SurveyTreeItem.id, Risk.identification == "no")
)
),
)
RISK_PRESENT_FILTER_TOP5_TNO_FILTER = sql.and_(
SurveyTreeItem.type == "risk",
sql.exists(
sql.select([Risk.sql_risk_id]).where(
sql.and_(
Risk.sql_risk_id == SurveyTreeItem.id,
sql.or_(
Risk.identification == "no",
sql.and_(
Risk.risk_type == "top5",
sql.or_(
sql.not_(Risk.identification.in_(["n/a", "yes"])),
Risk.identification == None, # noqa: E711
),
),
),
)
)
),
)
def _RISK_PRESENT_OR_TOP5_FILTER_factory():
Risk_ = orm.aliased(Risk)
return sql.and_(
SurveyTreeItem.type == "risk",
sql.exists(
sql.select([Risk_.sql_risk_id]).where(
sql.and_(
Risk_.sql_risk_id == SurveyTreeItem.id,
sql.or_(Risk_.identification == "no", Risk_.risk_type == "top5"),
)
)
),
)
RISK_PRESENT_OR_TOP5_FILTER = _RISK_PRESENT_OR_TOP5_FILTER_factory()
RISK_PRESENT_NO_TOP5_NO_POLICY_DO_EVALUTE_FILTER = sql.and_(
SurveyTreeItem.type == "risk",
sql.exists(
sql.select([Risk.sql_risk_id]).where(
sql.and_(
Risk.sql_risk_id == SurveyTreeItem.id,
sql.not_(Risk.risk_type.in_(["top5", "policy"])),
sql.not_(Risk.skip_evaluation == True), # noqa: E712
Risk.identification == "no",
)
)
),
)
EVALUATION_FILTER = sql.or_(
MODULE_WITH_RISK_NO_TOP5_NO_POLICY_DO_EVALUTE_FILTER,
RISK_PRESENT_NO_TOP5_NO_POLICY_DO_EVALUTE_FILTER,
)
ACTION_PLAN_FILTER = sql.or_(
MODULE_WITH_RISK_OR_TOP5_FILTER,
RISK_PRESENT_OR_TOP5_FILTER,
)
def _SKIPPED_MODULE_factory():
child_node = orm.aliased(SurveyTreeItem)
sql.exists().where(
sql.and_(
SurveyTreeItem.type == "module",
child_node.session_id == SurveyTreeItem.session_id,
child_node.skip_children == True, # noqa: E712
)
)
SKIPPED_MODULE = _SKIPPED_MODULE_factory()
UNANSWERED_RISKS_FILTER = sql.and_(
SurveyTreeItem.type == "risk",
sql.exists(
sql.select([Risk.sql_risk_id]).where(
sql.and_(
Risk.sql_risk_id == SurveyTreeItem.id,
Risk.identification == None, # noqa: E711
)
)
),
)
def _MODULE_WITH_UNANSWERED_RISKS_FILTER_factory():
child_node = orm.aliased(SurveyTreeItem)
return sql.and_(
SurveyTreeItem.type == "module",
SurveyTreeItem.skip_children == False, # noqa: E712
sql.exists(
sql.select([child_node.id]).where(
sql.and_(
child_node.session_id == SurveyTreeItem.session_id,
child_node.id == Risk.sql_risk_id,
child_node.type == "risk",
Risk.identification is None,
child_node.depth > SurveyTreeItem.depth,
child_node.path.like(SurveyTreeItem.path + "%"),
)
)
),
)
MODULE_WITH_UNANSWERED_RISKS_FILTER = _MODULE_WITH_UNANSWERED_RISKS_FILTER_factory()
def _MODULE_WITH_RISKS_NOT_PRESENT_FILTER_factory():
child_node = orm.aliased(SurveyTreeItem)
return sql.and_(
SurveyTreeItem.type == "module",
SurveyTreeItem.skip_children == False, # noqa: E712
sql.exists(
sql.select([child_node.id]).where(
sql.and_(
child_node.session_id == SurveyTreeItem.session_id,
child_node.id == Risk.sql_risk_id,
child_node.type == "risk",
Risk.identification == "yes",
child_node.depth > SurveyTreeItem.depth,
child_node.path.like(SurveyTreeItem.path + "%"),
)
)
),
)
MODULE_WITH_RISKS_NOT_PRESENT_FILTER = _MODULE_WITH_RISKS_NOT_PRESENT_FILTER_factory()
RISK_NOT_PRESENT_FILTER = sql.and_(
SurveyTreeItem.type == "risk",
sql.exists(
sql.select([Risk.sql_risk_id]).where(
sql.and_(
Risk.sql_risk_id == SurveyTreeItem.id, Risk.identification == "yes"
)
)
),
)
def get_current_account():
"""XXX this would be better placed in an api module, but we need to avoid
circular dependencies.
:return: The current Account instance if a user can be found,
otherwise None
"""
user_id = api.user.get_current().getId()
try:
return Session.query(Account).filter(Account.id == user_id).first()
except Exception:
log.warning("Unable to fetch account for username:")
log.warning(user_id)
class DefaultView(BrowserView):
"""Default @@index_html view for the objects in the model."""
def __call__(self):
"""Somebody called the default view for this object:
we do not want this to happen so we display a message and
redirect the user to the session start page
"""
api.portal.show_message(
"Wrong URL: %s" % self.request.getURL(), self.request, "warning"
)
webhelpers = api.content.get_view("webhelpers", self.context, self.request)
target = webhelpers.traversed_session.absolute_url() + "/@@start"
return self.request.response.redirect(target)
@ram.cache(lambda _: "show_timezone_cache_key")
def show_timezone():
timezone = Session.execute("SHOW TIMEZONE").first()
return pytz.timezone(timezone[0])
__all__ = [
"SurveySession",
"Module",
"Risk",
"ActionPlan",
"SKIPPED_PARENTS",
"MODULE_WITH_RISK_FILTER",
"MODULE_WITH_RISK_TOP5_TNO_FILTER",
"RISK_PRESENT_FILTER",
"RISK_PRESENT_FILTER_TOP5_TNO_FILTER",
"get_current_account",
] | PypiClean |
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/data/util/list_set.py | class ListSet(list):
"""ListSet is an internal Muntjac class which implements a combination of
a List and a Set. The main purpose of this class is to provide a list with
a fast L{contains} method. Each inserted object must by unique (as
specified by L{equals}). The L{set} method allows duplicates because of
the way L{sort} works.
This class is subject to change and should not be used outside Muntjac
core.
"""
def __init__(self, *args):
self._itemSet = None
# Contains a map from an element to the number of duplicates it has.
# Used to temporarily allow duplicates in the list.
self._duplicates = dict()
nargs = len(args)
if nargs == 0:
super(ListSet, self).__init__()
self._itemSet = set()
elif nargs == 1:
if isinstance(args[0], int):
initialCapacity, = args
super(ListSet, self).__init__()#initialCapacity)
self._itemSet = set()#initialCapacity)
else:
c, = args
super(ListSet, self).__init__(c)
self._itemSet = set()#len(c))
self._itemSet = self._itemSet.union(c)
else:
raise ValueError, 'too many arguments'
# Delegate contains operations to the set
def contains(self, o):
return o in self._itemSet
def __contains__(self, item):
return self.contains(item)
def containsAll(self, c):
for cc in c:
if cc not in self._itemSet:
return False
else:
return True
def append(self, val):
return self.add(val)
def insert(self, idx, val):
return self.add(idx, val)
# Methods for updating the set when the list is updated.
def add(self, *args):
"""Works as list.append or list.insert but returns
immediately if the element is already in the ListSet.
"""
nargs = len(args)
if nargs == 1:
e, = args
if self.contains(e):
# Duplicates are not allowed
return False
if not super(ListSet, self).__contains__(e):
super(ListSet, self).append(e)
self._itemSet.add(e)
return True
else:
return False
elif nargs == 2:
index, element = args
if self.contains(element):
# Duplicates are not allowed
return
super(ListSet, self).insert(index, element)
self._itemSet.add(element)
else:
raise ValueError, 'invalid number of arguments'
def extend(self, iterable):
return self.addAll(iterable)
def addAll(self, *args):
nargs = len(args)
if nargs == 1:
c, = args
modified = False
for e in c:
if self.contains(e):
continue
if self.add(e):
self._itemSet.add(e)
modified = True
return modified
elif nargs == 2:
index, c = args
#self.ensureCapacity(len(self) + len(c))
modified = False
for e in c:
if self.contains(e):
continue
self.add(index, e)
index += 1
self._itemSet.add(e)
modified = True
return modified
else:
raise ValueError, 'invalid number of arguments'
def clear(self):
del self[:]
self._itemSet.clear()
def index(self, val):
return self.indexOf(val)
def indexOf(self, o):
if not self.contains(o):
return -1
return super(ListSet, self).index(o)
def lastIndexOf(self, o):
if not self.contains(o):
return -1
return self[::-1].index(o)
def remove(self, o):
if isinstance(o, int):
index = o
e = super(ListSet, self).pop(index)
if e is not None:
self._itemSet.remove(e)
return e
else:
if super(ListSet, self).remove(o):
self._itemSet.remove(o)
return True
else:
return False
def removeRange(self, fromIndex, toIndex):
toRemove = set()
for idx in range(fromIndex, toIndex):
toRemove.add(self[idx])
del self[fromIndex:toIndex]
for r in toRemove:
self._itemSet.remove(r)
def set(self, index, element): #@PydevCodeAnalysisIgnore
if element in self:
# Element already exist in the list
if self[index] == element:
# At the same position, nothing to be done
return element
else:
# Adding at another position. We assume this is a sort
# operation and temporarily allow it.
# We could just remove (null) the old element and keep the list
# unique. This would require finding the index of the old
# element (indexOf(element)) which is not a fast operation in a
# list. So we instead allow duplicates temporarily.
self.addDuplicate(element)
old = self[index] = element
self.removeFromSet(old)
self._itemSet.add(element)
return old
def removeFromSet(self, e):
"""Removes "e" from the set if it no longer exists in the list.
"""
dupl = self._duplicates.get(e)
if dupl is not None:
# A duplicate was present so we only decrement the duplicate count
# and continue
if dupl == 1:
# This is what always should happen. A sort sets the items one
# by one, temporarily breaking the uniqueness requirement.
del self._duplicates[e]
else:
self._duplicates[e] = dupl - 1
else:
# The "old" value is no longer in the list.
self._itemSet.remove(e)
def addDuplicate(self, element):
"""Marks the "element" can be found more than once from the list.
Allowed in L{set} to make sorting work.
"""
nr = self._duplicates.get(element)
if nr is None:
nr = 1
else:
nr += 1
# Store the number of duplicates of this element so we know later on if
# we should remove an element from the set or if it was a duplicate (in
# removeFromSet)
self._duplicates[element] = nr
def clone(self):
v = ListSet(self[:])
v._itemSet = set(self._itemSet)
return v | PypiClean |
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/perturbSeqAnalysis.py | import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import export
import unique
import traceback
""" Intersecting Coordinate Files """
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importLookupTable(fn):
""" Import a gRNA to valid tag lookup table """
lookup_table = []
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
gRNA,tag = t
lookup_table.append((gRNA,tag))
return lookup_table
def importCountMatrix(fn,mask=False):
""" Import a count matrix """
classification = {}
firstRow = True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
headers = t[1:]
firstRow = False
else:
barcode = t[0]
values = map(int,t[1:])
if mask:
sum_counts = sum(values[2:])
else:
sum_counts = sum(values)
def threshold(val):
if val>0.3: return 1
else: return 0
if sum_counts>0:
ratios = map(lambda x: (1.000*x)/sum_counts, values)
if mask:
original_ratios = ratios
ratios = ratios[2:] ### mask the first two controls which are saturating
else:
original_ratios = ratios
hits = map(lambda x: threshold(x), ratios)
hits = sum(hits)
if sum_counts>20 and hits == 1:
index=0
for ratio in ratios:
if ratio>0.3:
header = headers[index]
index+=1
classification[barcode] = header
print len(classification),fn
return classification
def exportGuideToTags(lookup_table,gRNA_barcode,tag_barcode,output):
export_object = open(output,'w')
for barcode in gRNA_barcode:
gRNA = gRNA_barcode[barcode]
if barcode in tag_barcode:
tag = tag_barcode[barcode]
if (gRNA,tag) in lookup_table:
uid = tag+'__'+gRNA
export_object.write(barcode+'\t'+uid+'\t'+uid+'\n')
export_object.close()
if __name__ == '__main__':
################ Comand-line arguments ################
import getopt
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print 'WARNING!!!! Too commands supplied.'
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['species=','gRNA=', 'tag=', 'lookup=','output='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--gRNA':
gRNA = arg
elif opt == '--tag':
tag = arg
elif opt == '--lookup':
lookup = arg
elif opt == '--output':
output = arg
lookup_table = importLookupTable(lookup)
gRNA_barcode = importCountMatrix(gRNA)
tag_barcode = importCountMatrix(tag)
exportGuideToTags(lookup_table,gRNA_barcode,tag_barcode,output) | PypiClean |
/DRSlib-DavidRodriguezSoaresCUI-0.8.0.tar.gz/DRSlib-DavidRodriguezSoaresCUI-0.8.0/src/DRSlib/path_tools.py | # pylint: disable=too-few-public-methods, import-error, wrong-import-order, broad-except
"""
Path tools
==========
Easy to use tool to collect files matching a pattern.
Note: both class and function versions should be euivalent, both kept
just in case. Class may be usefull for repeated calls to `collect` method.
"""
from os import popen
from pathlib import Path
from send2trash import send2trash
from shutil import copy2
from typing import Union, List, Optional, Tuple
import logging
import re
import sys
from .execute import execute
from .os_detect import Os
from .utils import assertTrue
LOG = logging.getLogger(__file__)
MAKE_FS_SAFE_PATTERN = re.compile(pattern=r'[\\/*?:"<>|]')
try:
import win32api
except ImportError:
current_os = Os()
if current_os.windows:
LOG.error("Could not load win32api !")
class FileCollector:
"""Easy to use tool to collect files matching a pattern (recursive or not), using pathlib.glob.
Reasoning for making it a class: Making cohexist an initial check/processing on root with a recursive
main function was not straightforward. I did it anyway, so feel free to use the function alternative.
"""
def __init__(self, root: Path) -> None:
assertTrue(root.is_dir(), "Root dir must exist: '{}'", root)
root.resolve()
self.root = root
self.log = logging.getLogger(__file__)
self.log.debug("root=%s", root)
def collect(self, pattern: str = "**/*.*") -> List[Path]:
"""Collect files matching given pattern(s)"""
files = []
# 11/11/2020 BUGFIX : was collecting files in trash like a cyber racoon
files = [
item.resolve()
for item in self.root.glob(pattern)
if item.is_file() and ("$RECYCLE.BIN" not in item.parts)
]
self.log.debug("\t'%s': Found %s files in %s", pattern, len(files), self.root)
return files
def file_collector(root: Path, pattern: str = "**/*.*") -> List[Path]:
"""Easy to use tool to collect files matching a pattern (recursive or not), using pathlib.glob.
Collect files matching given pattern(s)"""
assertTrue(root.is_dir(), "Root dir must exist: '{}'", root)
root.resolve()
LOG.debug("root=%s", root)
def collect(_pattern: str) -> List[Path]:
# 11/11/2020 BUGFIX : was collecting files in trash like a cyber racoon
_files = [
item
for item in root.glob(_pattern)
if item.is_file() and ("$RECYCLE.BIN" not in item.parts)
]
LOG.debug("\t'%s': Found %s files in %s", _pattern, len(_files), root)
return _files
files = collect(pattern)
return files
def make_FS_safe(s: str) -> str:
"""File Systems don't accept all characters on file/directory names.
Return s with illegal characters stripped
Note: OS/FS agnostic, applies a simple filter on characters: ``\\, /, *, ?, :, ", <, >, |``
"""
return re.sub(pattern=MAKE_FS_SAFE_PATTERN, repl="", string=s)
def find_available_path(root: Path, base_name: str, file: bool = True) -> Path:
"""Returns a path to a file/directory that DOESN'T already exist.
The file/dir the user wishes to make a path for is referred as X.
`root`: where X must be created. Can be a list of path parts
`base_name`: the base name for X. May be completed with '(index)' if name already exists.
`file`: True if X is a file, False if it is a directory
"""
# Helper function: makes suffixes for already existing files/directories
def suffixes():
yield ""
idx = 0
while True:
idx += 1
yield f" ({idx})"
# Iterate over candidate paths until an unused one is found
safe_base_name = make_FS_safe(base_name)
if file:
# name formatting has to keep the extension at the end of the name !
ext_idx = safe_base_name.rfind(".")
assertTrue(ext_idx != -1, "Can't find dot in name '{}'", safe_base_name)
f_name, f_ext = safe_base_name[:ext_idx], safe_base_name[ext_idx:]
for suffix in suffixes():
_object = root / (f_name + suffix + f_ext)
if not _object.is_file():
return _object
else:
for suffix in suffixes():
_object = root / (safe_base_name + suffix)
if not _object.is_dir():
return _object
raise RuntimeError("Failed to find an available path")
def make_valid_path(
root: Union[Path, List], base_name: str, file: bool = True, create: bool = False
) -> Path:
"""Returns a path to a file/directory that DOESN'T already exist.
The file/dir the user wishes to make a path for is referred as X.
`root`: where X must be created. Can be a list of path parts
`base_name`: the base name for X. May be completed with '(index)' if name already exists.
`file`: True if X is a file, False if it is a directory
`create`: True instantiates X (empty file or dir), False doesn't
Build upon `find_available_path`, adding:
- root path construction (List->Path)
- root mkdir
- ability to initialize returned file/dir
"""
# make root path
if isinstance(root, List):
if isinstance(root[0], str):
_root = Path(make_FS_safe(root[0]))
elif isinstance(root[0], Path):
_root = root[0]
else:
raise TypeError(
f"root[0]={root[0]} is of unexpected type {type(root[0])}, not str or Path !"
)
for path_part in root[1:]:
assertTrue(
isinstance(path_part, str),
"path part in root '{}' is of unexpected type {}, not str !",
path_part,
type(path_part),
)
safe_path_part = make_FS_safe(path_part)
assertTrue(
safe_path_part is not None,
"make_FS_safe returned None for '{}'",
path_part,
)
_root = _root / safe_path_part
elif isinstance(root, Path):
_root = root
else:
raise TypeError(
f"root={root} is of unexpected type {type(root)}, not str or Path !"
)
# make root directory
ensure_dir_exists(_root)
# Find valid path
valid_path = find_available_path(_root, base_name, file)
# Optionally create file/dir
if create:
if file:
valid_path.touch()
else:
valid_path.mkdir()
return valid_path
def ensure_dir_exists(folder: Path) -> None:
"""Tests whether `folder` exists, creates it (and its whole path) if it doesn't."""
if folder.is_file():
raise ValueError(f"Given path '{folder}' is a file !")
if not folder.is_dir():
folder.mkdir(parents=True)
def folder_get_file_count(_root: Path, use_fallback: bool = False) -> int:
"""Uses built-in platform-specific ways to recursively count the number of files in a given directory.
Reason for using CLI calls to platform-specific external tools : they typically offer superior performance (because optimised)
`use_fallback` : if True, use Path.glob instead of platform-specific CLI calls (mainly for testing puposes)
"""
_root = _root.resolve()
def fallback() -> int:
return sum(1 for x in _root.glob("**/*") if x.is_file())
if use_fallback:
return fallback()
_current_os = Os()
command = None
if _current_os.windows:
# Windows CMD
LOG.debug("Crawler from '%s'", _root)
command = f'dir "{_root}" /A:-D /B /S | find "." /C'
elif _current_os.wsl or _current_os.linux:
# Linux
LOG.debug("Crawler from '%s'", _root)
command = f'find "{_root}" -type f|wc -l'
else:
LOG.warning(
"OS not recognised or has no specific command set (%s); fallback method used.",
_current_os,
)
return fallback()
return int(popen(command).read().strip())
def folder_get_subdirs(root_dir: Path) -> List[Path]:
"""Return a list of first level subdirectories"""
assertTrue(root_dir.is_dir(), "Root dir must exist: '{}'", root_dir)
return [
item
for item in root_dir.resolve().iterdir()
if item.is_dir() and ("$RECYCLE.BIN" not in item.parts)
]
def windows_list_logical_drives() -> List[Path]:
"""Uses windows-specific methods to retrieve a list of logical drives.
Both methods have been developped and tested to give equivalent output and be interchangeable
Warning: Only works on Windows !
"""
def method1():
"""uses a windows shell command to list drives"""
def filter_drives(lst):
for item in lst:
if not item:
continue
try:
yield Path(item).resolve()
except Exception: # nosec B112
continue
drives = list(filter_drives(win32api.GetLogicalDriveStrings().split("\x00")))
return drives
def method2():
"""uses a windows shell command to list drives"""
command = ["wmic", "logicaldisk", "get", "name"]
stdout = execute(command)["stdout"]
def return_cleaned(lst):
for item in lst:
if len(item) < 2:
continue
if item[0].isupper() and item[1] == ":":
try:
# Bugfix : the '<driveletter>:' format was resolving to CWD when driveletter==CWD's driveletter.
# This seems to be an expected Windows behavior. Fix: switch to '<driveletter>:\\' format, whis is more appropriate.
yield Path(item[:2] + "\\").resolve()
except Exception: # nosec B112
continue
drives = list(return_cleaned(stdout.splitlines()))
return drives
try:
return method1()
except Exception:
try:
return method2()
except Exception as e:
LOG.error("windows_list_logical_drives: something went wrong: %s", e)
sys.exit(1)
def safe_file_copy(
file: Path,
destination_dir: Path,
file_size: Optional[int] = None,
rename_if_destination_exists: bool = False,
) -> Tuple[Optional[Path], int]:
"""Copies file to some directory, and returns the destination file path and the amount of bytes copied.
If target file already exists, nothing is copied (content is not verified for a match).
Note: Can fail if:
* source file doesn't exist
* target file exists and ``rename_if_destination_exists=False``
* shutils.copy2 fails (out of space error or other).
`file_size`: File size in bytes. If provided, avoids a call to check the actual file size.
`rename_if_destination_exists`: Instead of failing if a file with same name already exist in destination
directory, choose an alternative name instead (add ' (1)' or similar at the end of the name)
Returns: Tuple (<target_file_path:Path|None>, <copied_bytes:int>)
"""
assertTrue(
file is not None and file.is_file(),
"Argument error: File None or non-existing: '{}'",
file,
)
# Making target path
target = destination_dir / file.name
if target.exists():
if rename_if_destination_exists:
target = find_available_path(
root=destination_dir, base_name=file.name, file=True
)
else:
LOG.warning(
"Cannot copy %s because target already exist : %s", file.name, target
)
return (None, 0)
# Copy
LOG.info("Copying %s -> %s", file, target)
copy2(file, target)
LOG.info("Copying done !")
return (target, file_size if file_size else file.stat().st_size)
def replace_file(to_be_replaced: Path, replacing: Path) -> None:
"""Tries to replace a file by another. Sends both ``to_be_replaced`` file
and original ``replacing`` file to trash.
"""
assertTrue(
to_be_replaced.is_file(), "File to be replaced must exist: '{}'", to_be_replaced
)
assertTrue(
replacing.is_file(),
"File replacing file to be replaced must exist: '{}'",
replacing,
)
LOG.info("Sending '%s' to trash", to_be_replaced)
send2trash(to_be_replaced)
LOG.info("Replacing file %s by file at %s", to_be_replaced, replacing)
bytes_to_move = replacing.stat().st_size
_, bytes_copied = safe_file_copy(
file=replacing, destination_dir=to_be_replaced.parent
)
if bytes_copied != bytes_to_move:
LOG.warning(
"Something went wrong while copying: bytes_copied=%d != %d=bytes_to_move",
bytes_copied,
bytes_to_move,
)
return
LOG.info("Sending '%s' to trash", replacing)
send2trash(replacing)
def open_folder_in_explorer(directory: Path) -> None:
"""Tries to open a file explorer window to given directory
Note: as of now, only the windows-specific part was tested,
not WSL or linux
"""
directory_s = str(directory.resolve())
_os = Os()
if _os.windows or _os.cygwin or _os.wsl:
command = ["explorer.exe", directory_s.replace("\\" * 2, "\\")]
elif _os.linux:
command = ["xdg-open", directory_s]
else:
LOG.warning("Unsupported OS platform '%s' !", _os)
return
LOG.debug("executing command: '%s'", command)
execute(command) | PypiClean |
/KaTrain-1.14.0-py3-none-any.whl/katrain/core/tsumego_frame.py | from katrain.core.game_node import GameNode
from katrain.core.sgf_parser import Move
# tsumego frame ported from lizgoban by kaorahi
# note: coords = (j, i) in katrain
near_to_edge = 2
offence_to_win = 5
BLACK = "B"
WHITE = "W"
def tsumego_frame_from_katrain_game(game, komi, black_to_play_p, ko_p, margin):
current_node = game.current_node
bw_board = [[game.chains[c][0].player if c >= 0 else "-" for c in line] for line in game.board]
isize, jsize = ij_sizes(bw_board)
blacks, whites, analysis_region = tsumego_frame(bw_board, komi, black_to_play_p, ko_p, margin)
sgf_blacks = katrain_sgf_from_ijs(blacks, isize, jsize, "B")
sgf_whites = katrain_sgf_from_ijs(whites, isize, jsize, "W")
played_node = GameNode(parent=current_node, properties={"AB": sgf_blacks, "AW": sgf_whites}) # this inserts
katrain_region = analysis_region and (analysis_region[1], analysis_region[0])
return (played_node, katrain_region)
def katrain_sgf_from_ijs(ijs, isize, jsize, player):
return [Move((j, i)).sgf((jsize, isize)) for i, j in ijs]
def tsumego_frame(bw_board, komi, black_to_play_p, ko_p, margin):
stones = stones_from_bw_board(bw_board)
filled_stones = tsumego_frame_stones(stones, komi, black_to_play_p, ko_p, margin)
region_pos = pick_all(filled_stones, "tsumego_frame_region_mark")
bw = pick_all(filled_stones, "tsumego_frame")
blacks = [(i, j) for i, j, black in bw if black]
whites = [(i, j) for i, j, black in bw if not black]
return (blacks, whites, get_analysis_region(region_pos))
def pick_all(stones, key):
return [[i, j, s.get("black")] for i, row in enumerate(stones) for j, s in enumerate(row) if s.get(key)]
def get_analysis_region(region_pos):
if len(region_pos) == 0:
return None
ai, aj, dummy = tuple(zip(*region_pos))
ri = (min(ai), max(ai))
rj = (min(aj), max(aj))
return ri[0] < ri[1] and rj[0] < rj[1] and (ri, rj)
def tsumego_frame_stones(stones, komi, black_to_play_p, ko_p, margin):
sizes = ij_sizes(stones)
isize, jsize = sizes
ijs = [
{"i": i, "j": j, "black": h.get("black")}
for i, row in enumerate(stones)
for j, h in enumerate(row)
if h.get("stone")
]
if len(ijs) == 0:
return []
# find range of problem
top = min_by(ijs, "i", +1)
left = min_by(ijs, "j", +1)
bottom = min_by(ijs, "i", -1)
right = min_by(ijs, "j", -1)
imin = snap0(top["i"])
jmin = snap0(left["j"])
imax = snapS(bottom["i"], isize)
jmax = snapS(right["j"], jsize)
# flip/rotate for standard position
# don't mix flip and swap (FF = SS = identity, but SFSF != identity)
flip_spec = (
[False, False, True] if imin < jmin else [need_flip_p(imin, imax, isize), need_flip_p(jmin, jmax, jsize), False]
)
if True in flip_spec:
flipped = flip_stones(stones, flip_spec)
filled = tsumego_frame_stones(flipped, komi, black_to_play_p, ko_p, margin)
return flip_stones(filled, flip_spec)
# put outside stones
i0 = imin - margin
i1 = imax + margin
j0 = jmin - margin
j1 = jmax + margin
frame_range = [i0, i1, j0, j1]
black_to_attack_p = guess_black_to_attack([top, bottom, left, right], sizes)
put_border(stones, sizes, frame_range, black_to_attack_p)
put_outside(stones, sizes, frame_range, black_to_attack_p, black_to_play_p, komi)
put_ko_threat(stones, sizes, frame_range, black_to_attack_p, black_to_play_p, ko_p)
return stones
# detect corner/edge/center problems
# (avoid putting border stones on the first lines)
def snap(k, to):
return to if abs(k - to) <= near_to_edge else k
def snap0(k):
return snap(k, 0)
def snapS(k, size):
return snap(k, size - 1)
def min_by(ary, key, sign):
by = [sign * z[key] for z in ary]
return ary[by.index(min(by))]
def need_flip_p(kmin, kmax, size):
return kmin < size - kmax - 1
def guess_black_to_attack(extrema, sizes):
return sum([sign_of_color(z) * height2(z, sizes) for z in extrema]) > 0
def sign_of_color(z):
return 1 if z["black"] else -1
def height2(z, sizes):
isize, jsize = sizes
return height(z["i"], isize) + height(z["j"], jsize)
def height(k, size):
return size - abs(k - (size - 1) / 2)
######################################
# sub
def put_border(stones, sizes, frame_range, is_black):
i0, i1, j0, j1 = frame_range
put_twin(stones, sizes, i0, i1, j0, j1, is_black, False)
put_twin(stones, sizes, j0, j1, i0, i1, is_black, True)
def put_twin(stones, sizes, beg, end, at0, at1, is_black, reverse_p):
for at in (at0, at1):
for k in range(beg, end + 1):
i, j = (at, k) if reverse_p else (k, at)
put_stone(stones, sizes, i, j, is_black, False, True)
def put_outside(stones, sizes, frame_range, black_to_attack_p, black_to_play_p, komi):
isize, jsize = sizes
count = 0
offense_komi = (+1 if black_to_attack_p else -1) * komi
defense_area = (isize * jsize - offense_komi - offence_to_win) / 2
for i in range(isize):
for j in range(jsize):
if inside_p(i, j, frame_range):
continue
count += 1
black_p = xor(black_to_attack_p, (count <= defense_area))
empty_p = (i + j) % 2 == 0 and abs(count - defense_area) > isize
put_stone(stones, sizes, i, j, black_p, empty_p)
# standard position:
# ? = problem, X = offense, O = defense
# OOOOOOOOOOOOO
# OOOOOOOOOOOOO
# OOOOOOOOOOOOO
# XXXXXXXXXXXXX
# XXXXXXXXXXXXX
# XXXX.........
# XXXX.XXXXXXXX
# XXXX.X???????
# XXXX.X???????
# (pattern, top_p, left_p)
offense_ko_threat = (
"""
....OOOX.
.....XXXX
""",
True,
False,
)
defense_ko_threat = (
"""
..
..
X.
XO
OO
.O
""",
False,
True,
)
def put_ko_threat(stones, sizes, frame_range, black_to_attack_p, black_to_play_p, ko_p):
isize, jsize = sizes
for_offense_p = xor(ko_p, xor(black_to_attack_p, black_to_play_p))
pattern, top_p, left_p = offense_ko_threat if for_offense_p else defense_ko_threat
aa = [list(line) for line in pattern.splitlines() if len(line) > 0]
height, width = ij_sizes(aa)
for i, row in enumerate(aa):
for j, ch in enumerate(row):
ai = i + (0 if top_p else isize - height)
aj = j + (0 if left_p else jsize - width)
if inside_p(ai, aj, frame_range):
return
black = xor(black_to_attack_p, ch == "O")
empty = ch == "."
put_stone(stones, sizes, ai, aj, black, empty)
def xor(a, b):
return bool(a) != bool(b)
######################################
# util
def flip_stones(stones, flip_spec):
swap_p = flip_spec[2]
sizes = ij_sizes(stones)
isize, jsize = sizes
new_isize, new_jsize = [jsize, isize] if swap_p else [isize, jsize]
new_stones = [[None for z in range(new_jsize)] for row in range(new_isize)]
for i, row in enumerate(stones):
for j, z in enumerate(row):
new_i, new_j = flip_ij((i, j), sizes, flip_spec)
new_stones[new_i][new_j] = z
return new_stones
def put_stone(stones, sizes, i, j, black, empty, tsumego_frame_region_mark=False):
isize, jsize = sizes
if i < 0 or isize <= i or j < 0 or jsize <= j:
return
stones[i][j] = (
{}
if empty
else {
"stone": True,
"tsumego_frame": True,
"black": black,
"tsumego_frame_region_mark": tsumego_frame_region_mark,
}
)
def inside_p(i, j, region):
i0, i1, j0, j1 = region
return i0 <= i and i <= i1 and j0 <= j and j <= j1
def stones_from_bw_board(bw_board):
return [[stone_from_str(s) for s in row] for row in bw_board]
def stone_from_str(s):
black = s == BLACK
white = s == WHITE
return {"stone": True, "black": black} if (black or white) else {}
def ij_sizes(stones):
return (len(stones), len(stones[0]))
def flip_ij(ij, sizes, flip_spec):
i, j = ij
isize, jsize = sizes
flip_i, flip_j, swap_ij = flip_spec
fi = flip1(i, isize, flip_i)
fj = flip1(j, jsize, flip_j)
return (fj, fi) if swap_ij else (fi, fj)
def flip1(k, size, flag):
return size - 1 - k if flag else k | PypiClean |
/Komoe-0.4.0-py3-none-any.whl/komoe/commands.py | import click
import os
from pathlib import Path
import traceback
import watchfiles
from . import template, __version__
from .config import ProjectConfig
from .builder import Builder
from . import log
@click.group()
def main():
pass
@main.command()
@click.argument(
"path",
default=".",
type=click.Path(file_okay=False, path_type=Path),
)
@click.option(
"--project-name", "-N", prompt=True, required=True, help="The name of the project"
)
def new(path, project_name):
"""Creates a new project
If PATH isn't specified, the current directory is used.
"""
if path.exists():
entries = [
entry for entry in path.iterdir() if not entry.name.startswith(".git")
]
if len(entries) != 0:
path_repr = str(path)
if path_repr == ".":
path_repr = "the current directory"
raise click.ClickException(f"{path_repr} isn't empty")
else:
os.makedirs(path)
template.create_new_project(path, project_name)
@main.command()
@click.option(
"--project-file",
"-p",
type=click.Path(dir_okay=False, exists=True, path_type=Path),
help="Build a specific project file (overrides --project-dir)",
)
@click.option(
"--project-dir",
"-P",
type=click.Path(file_okay=False, exists=True, path_type=Path),
help="Build the project in that directory",
)
@click.option("--fresh", is_flag=True, help="Regenerates all content")
@click.option("--watch", is_flag=True, help="Rebuild as files change")
def build(project_file, project_dir, fresh, watch):
"""Build a project
If no project is specified, the project in the current directory will be built.
"""
if project_file is not None:
config_path = project_file
elif (project_dir is not None) and (
"komoe.toml" in f.name for f in project_dir.iterdir()
):
config_path = project_dir / "komoe.toml"
elif "komoe.toml" in (f.name for f in Path.cwd().iterdir()):
config_path = Path.cwd() / "komoe.toml"
else:
raise click.ClickException("project file not found")
config = load_config(config_path)
builder = Builder(config, config_path.parent, fresh=fresh)
builder.build()
if watch:
while True:
try:
click.echo("Waiting for a file to change ...")
for changes in watchfiles.watch(config_path.parent):
need_rebuild = False
force_fresh = False
for _, file in changes:
path = Path(file)
if (not path.is_relative_to(builder.output_dir)) and (
not path.is_relative_to(builder.cache_dir)
):
# source files
if any(
path.is_relative_to(srcdir)
for srcdir in builder.snapshot_dirs
):
need_rebuild = True
# project file and plugins
elif path.name == "komoe.toml" or path.suffix == ".py":
need_rebuild = True
force_fresh = True
if need_rebuild:
if force_fresh:
log.info("The project file or a plugin changed")
builder = Builder(
config, config_path.parent, fresh=fresh or force_fresh
)
builder.build()
click.echo("Waiting for a file to change ...")
except KeyboardInterrupt:
click.echo("\nWatch stoppped")
break
except Exception as e:
click.secho(
"".join(traceback.format_tb(e.__traceback__)), nl=False, dim=True
)
log.error(f"{type(e).__name__}: {e}")
else:
click.echo("✨ All done ! ✨")
def load_config(path):
config = ProjectConfig.from_file(path)
if config.minimum_required_version > __version__:
raise click.ClickException(
f"The project requires at least Komoe v{config.minimum_required_version}"
)
return config | PypiClean |
/CaseRecommender-1.1.1.tar.gz/CaseRecommender-1.1.1/caserec/recommenders/rating_prediction/svd.py | # © 2019. Case Recommender (MIT License)
import numpy as np
from scipy.sparse.linalg import svds
from caserec.recommenders.rating_prediction.base_rating_prediction import BaseRatingPrediction
from caserec.utils.extra_functions import timed
__author__ = 'Arthur Fortes <fortes.arthur@gmail.com>'
class SVD(BaseRatingPrediction):
def __init__(self, train_file=None, test_file=None, output_file=None, factors=10, sep='\t', output_sep='\t',
random_seed=None):
"""
Matrix Factorization for rating prediction
Matrix factorization models map both users and items to a joint latent factor space of dimensionality f,
such that user-item interactions are modeled as inner products in that space.
Usage::
>> MatrixFactorization(train, test).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param factors: Number of latent factors per user/item
:type factors: int, default 10
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
"""
super(SVD, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file, sep=sep,
output_sep=output_sep)
self.recommender_name = 'SVD'
self.factors = factors
if random_seed is not None:
np.random.seed(random_seed)
# internal vars
self.feedback_triples = None
self.prediction_matrix = None
def init_model(self):
"""
Method to treat and initialize the model
"""
self.feedback_triples = []
# Map interaction with ids
for user in self.train_set['feedback']:
for item in self.train_set['feedback'][user]:
self.feedback_triples.append((self.user_to_user_id[user], self.item_to_item_id[item],
self.train_set['feedback'][user][item]))
self.create_matrix()
def fit(self):
"""
This method performs Singular Value Decomposition over the training data.
"""
u, s, vt = svds(self.matrix, k=self.factors)
s_diagonal_matrix = np.diag(s)
self.prediction_matrix = np.dot(np.dot(u, s_diagonal_matrix), vt)
def predict_score(self, u, i, cond=True):
"""
Method to predict a single score for a pair (user, item)
:param u: User ID
:type u: int
:param i: Item ID
:type i: int
:param cond: Use max and min values of train set to limit score
:type cond: bool, default True
:return: Score generate for pair (user, item)
:rtype: float
"""
rui = self.train_set["mean_value"] + self.prediction_matrix[u][i]
if cond:
if rui > self.train_set["max_value"]:
rui = self.train_set["max_value"]
elif rui < self.train_set["min_value"]:
rui = self.train_set["min_value"]
return rui
def predict(self):
"""
This method computes a final rating for unknown pairs (user, item)
"""
if self.test_file is not None:
for user in self.test_set['users']:
for item in self.test_set['feedback'][user]:
self.predictions.append((user, item, self.predict_score(self.user_to_user_id[user],
self.item_to_item_id[item], True)))
else:
raise NotImplemented
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(SVD, self).compute(verbose=verbose)
if verbose:
self.init_model()
print("training_time:: %4f sec" % timed(self.fit))
if self.extra_info_header is not None:
print(self.extra_info_header)
print("prediction_time:: %4f sec" % timed(self.predict))
print('\n')
else:
# Execute all in silence without prints
self.init_model()
self.fit()
self.predict()
self.write_predictions()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep) | PypiClean |
/Beetle_Preview-0.1.0-py3-none-any.whl/beetle_preview/__init__.py | from beetle.renderers import MissingRendererError
from http import server
from socketserver import TCPServer
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
from watchdog.events import FileSystemEventHandler
from hashlib import md5
import os
class Updater(FileSystemEventHandler):
def __init__(self, serve_directory, writer):
self.basepath = os.getcwd()
self.directory = serve_directory
self.writer = writer
self.cache = {}
def on_any_event(self, event):
# Urgh, ugly directory hack.
# Could not find an easy way to serve files from a subfolder.
os.chdir('..')
for destination, content in self.writer.files():
digest = md5(content).hexdigest()
abs_path = os.path.join(
self.basepath,
self.directory,
destination,
)
try:
if destination not in self.cache:
self.writer.write_file(abs_path, content)
self.cache[destination] = digest
print('written', destination)
elif self.cache[destination] != digest:
self.writer.write_file(abs_path, content)
self.cache[destination] = digest
print('updated', destination)
except MissingRendererError:
print('could not render:{}'.format(destination))
os.chdir(self.directory)
class Server:
def __init__(self, folders, output_folder, port, updater):
self.directory = output_folder
# self.content = content_folder
self.folders = folders
self.port = port
self.updater = updater
def monitor(self):
observer = Observer()
for each in self.folders:
observer.schedule(self.updater, each, recursive=True)
# observer.schedule(self.updater, self.content, recursive=True)
observer.start()
def serve(self):
self.monitor()
os.chdir(self.directory)
request_handler = server.SimpleHTTPRequestHandler
httpd = TCPServer(('', self.port), request_handler)
try:
print('Preview available at http://0.0.0.0:{}/'.format(self.port))
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
def register(ctx, config):
folders = [
ctx.config.folders['content'],
ctx.config.folders['include'],
ctx.config.folders['templates'],
]
# content_folder = ctx.config.folders['content']
output_folder = ctx.config.folders['output']
port = config.get('port', 5000)
updater = Updater(ctx.config.folders['output'], ctx.writer)
server = Server(folders, output_folder, port, updater)
ctx.commander.add('preview', server.serve, 'Serve the rendered site') | PypiClean |
/Cuckoo-2.0.7a1.tar.gz/Cuckoo-2.0.7a1/cuckoo/processing/apkinfo.py |
import hashlib
import logging
import os
import zipfile
from cuckoo.common.objects import File
from cuckoo.common.abstracts import Processing
from cuckoo.common.exceptions import CuckooProcessingError
log = logging.getLogger(__name__)
class ApkInfo(Processing):
"""Static android information about analysis session."""
def check_size(self, file_list):
for file in file_list:
if "classes.dex" in file["name"]:
if("decompilation_threshold" in self.options):
if file["size"] < self.options.decompilation_threshold:
return True
else:
return False
else:
return True
return False
def _apk_files(self, apk):
"""Returns a list of files in the APK."""
ret = []
for fname, filetype in apk.get_files_types().items():
buf = apk.zip.read(fname)
ret.append({
"name": fname,
"md5": hashlib.md5(buf).hexdigest(),
"size": len(buf),
"type": filetype,
})
return ret
def run(self):
"""Run androguard to extract static android information
@return: list of static features
"""
self.key = "apkinfo"
apkinfo = {}
if "file" not in self.task["category"]:
return
from androguard.core.bytecodes.apk import APK
from androguard.core.bytecodes.dvm import DalvikVMFormat
from androguard.core.analysis.analysis import uVMAnalysis
from androguard.core.analysis import analysis
f = File(self.task["target"])
if f.get_name().endswith((".zip", ".apk")) or "zip" in f.get_type():
if not os.path.exists(self.file_path):
raise CuckooProcessingError("Sample file doesn't exist: \"%s\"" % self.file_path)
try:
a = APK(self.file_path)
if a.is_valid_APK():
manifest = {}
apkinfo["files"] = self._apk_files(a)
manifest["package"] = a.get_package()
# manifest["permissions"]=a.get_details_permissions_new()
manifest["main_activity"] = a.get_main_activity()
manifest["activities"] = a.get_activities()
manifest["services"] = a.get_services()
manifest["receivers"] = a.get_receivers()
# manifest["receivers_actions"]=a.get__extended_receivers()
manifest["providers"] = a.get_providers()
manifest["libraries"] = a.get_libraries()
apkinfo["manifest"] = manifest
# apkinfo["certificate"] = a.get_certificate()
static_calls = {}
if self.check_size(apkinfo["files"]):
vm = DalvikVMFormat(a.get_dex())
vmx = uVMAnalysis(vm)
static_calls["all_methods"] = self.get_methods(vmx)
static_calls["is_native_code"] = analysis.is_native_code(vmx)
static_calls["is_dynamic_code"] = analysis.is_dyn_code(vmx)
static_calls["is_reflection_code"] = analysis.is_reflection_code(vmx)
# static_calls["dynamic_method_calls"]= analysis.get_show_DynCode(vmx)
# static_calls["reflection_method_calls"]= analysis.get_show_ReflectionCode(vmx)
# static_calls["permissions_method_calls"]= analysis.get_show_Permissions(vmx)
# static_calls["crypto_method_calls"]= analysis.get_show_CryptoCode(vmx)
# static_calls["native_method_calls"]= analysis.get_show_NativeMethods(vmx)
else:
log.warning("Dex size bigger than: %s",
self.options.decompilation_threshold)
apkinfo["static_method_calls"] = static_calls
except (IOError, OSError, zipfile.BadZipfile) as e:
raise CuckooProcessingError("Error opening file %s" % e)
return apkinfo
def get_methods(self, vmx):
methods = []
for i in vmx.get_methods():
method = {}
i.create_tags()
if not i.tags.empty():
proto = i.method.proto.replace("(", "").replace(";", "")
protos = proto.split(")")
params = protos[0].split(" ")
method["class"] = i.method.get_class_name().replace(";", "")
method["name"] = i.method.name
if params and params[0]:
method["params"] = params
method["return"] = protos[1]
methods.append(method)
return methods | PypiClean |
/Nano-CAT-0.7.2.tar.gz/Nano-CAT-0.7.2/nanoCAT/ff/cp2k_utils.py | from scm.plams import Molecule, Settings, Atom
__all__ = ['set_cp2k_element', 'set_cp2k_param']
def set_cp2k_element(settings: Settings, mol: Molecule) -> None:
"""Set the FORCE_EVAL/SUBSYS/KIND/ELEMENT_ keyword(s) in CP2K job settings.
Performs an inplace update of the input.force_eval.subsys key in **settings**.
.. _ELEMENT: https://manual.cp2k.org/trunk/CP2K_INPUT/FORCE_EVAL/SUBSYS/KIND.html#ELEMENT
Parameters
----------
settings : |plams.Settings|_
CP2K settings.
mol : |plams.Molecule|_
A PLAMS molecule whose atoms possess the
:attr:`Atom.properties` ``["atom_type"]`` attribute.
"""
def _get_symbol(at: Atom) -> str:
ret = at.properties.get('symbol', at.symbol)
return ret if ret else at.symbol
symbol_dict = {_get_symbol(at): at.symbol for at in mol}
subsys = settings.input.force_eval.subsys
for k, v in symbol_dict.items():
subsys[f'kind {k}'] = {'element': v}
def set_cp2k_param(settings: Settings, param_dict: dict) -> None:
"""Placeholder."""
for block_name, block in param_dict.items():
# Create a to-be formatted string with user-specified units
unit = f'[{block.unit}]' + ' {}' if 'unit' in block else '{}'
# Get the to-be update list of settings
s = settings.get_nested(block['keys'])
if not isinstance(s, list):
_s = settings.get_nested(block['keys'][:-1])
s = _s[block['keys'][-1]] = []
for k, v in block.items():
if k in ('keys', 'unit'): # Skip
continue
value = unit.format(v)
atom = 'atoms' if len(k.split()) > 1 else 'atom'
atom_list = [i[atom] for i in s]
try: # Intersecting set
idx = atom_list.index(k)
s[idx].update({block_name: value})
except ValueError: # Disjoint set
new_block = Settings({atom: k, block_name: value})
s.append(new_block) | PypiClean |
/Chubasquero-1.0.0.tar.gz/Chubasquero-1.0.0/chubasquero/static/post.js |
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
'use strict';
/**
* A particular translation for a post.
* @class
*/
class Translation {
constructor(jsonObject) {
const thereisJsonObject = typeof jsonObject === 'object';
this.translation = thereisJsonObject
? jsonObject.meta.translation
: true;
this.content = thereisJsonObject ? jsonObject.content : '';
}
}
/**
* This is a post.
* @class
*
* This uses the global constant DEFAULT_LANG
*
* @todo Title is inside content. Is better use the slug.
*/
class Post {
constructor(jsonObject) {
const thereisJsonObject = typeof jsonObject === 'object';
this.meta = {
slug: thereisJsonObject ? jsonObject.meta.slug : '',
date: thereisJsonObject ? jsonObject.meta.date : Post._get_now(),
modified: thereisJsonObject ? jsonObject.meta.modified : null,
tags: thereisJsonObject ? jsonObject.meta.tags : '',
category: thereisJsonObject ? jsonObject.meta.category : '',
lang: thereisJsonObject ? jsonObject.meta.lang : DEFAULT_LANG,
authors: [],
//summary: '',
};
this.isPage = thereisJsonObject ? jsonObject.isPage : false;
this.content = thereisJsonObject ? jsonObject.content : '';
// FIXME: replace Map by object because vuejs
this.translations = {};
if (thereisJsonObject) {
for (const lang in jsonObject.translations) {
this.translations[lang] = new Translation(jsonObject.translations[lang]);
}
}
}
/**
* @param {string} translation Identifier of the translations
*/
getContentByTranslation (translation) {
return this.translations[translation];
}
/**
* Add a new translation to this post.
*
* @param {string} translation Identifier for this translation
*/
addTranslation (translation) {
this.translations.set(translation, new Translation());
}
/**
* Mark this modified.
*/
modify(){
this.meta.modified = Post._get_now();
}
get tags () {
return this.meta.tags.replace(/, /g, ' ');
}
/**
* @param {string} tags_string List of tags as a string
*/
set tags (tags_string) {
this.meta.tags = tags_string.replace(/ /g, ', ');
}
static _get_now () {
const now = new Date();
return `${now.getFullYear()}-${now.getMonth() + 1}-${now.getDate()} ${now.getHours()}:${now.getMinutes()}`;
}
} | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Platform/__init__.py |
__revision__ = "src/engine/SCons/Platform/__init__.py 2014/07/05 09:42:21 garyo"
import SCons.compat
import imp
import os
import sys
import tempfile
import SCons.Errors
import SCons.Subst
import SCons.Tool
def platform_default():
"""Return the platform string for our execution environment.
The returned value should map to one of the SCons/Platform/*.py
files. Since we're architecture independent, though, we don't
care about the machine architecture.
"""
osname = os.name
if osname == 'java':
osname = os._osType
if osname == 'posix':
if sys.platform == 'cygwin':
return 'cygwin'
elif sys.platform.find('irix') != -1:
return 'irix'
elif sys.platform.find('sunos') != -1:
return 'sunos'
elif sys.platform.find('hp-ux') != -1:
return 'hpux'
elif sys.platform.find('aix') != -1:
return 'aix'
elif sys.platform.find('darwin') != -1:
return 'darwin'
else:
return 'posix'
elif os.name == 'os2':
return 'os2'
else:
return sys.platform
def platform_module(name = platform_default()):
"""Return the imported module for the platform.
This looks for a module name that matches the specified argument.
If the name is unspecified, we fetch the appropriate default for
our execution environment.
"""
full_name = 'SCons.Platform.' + name
if full_name not in sys.modules:
if os.name == 'java':
eval(full_name)
else:
try:
file, path, desc = imp.find_module(name,
sys.modules['SCons.Platform'].__path__)
try:
mod = imp.load_module(full_name, file, path, desc)
finally:
if file:
file.close()
except ImportError:
try:
import zipimport
importer = zipimport.zipimporter( sys.modules['SCons.Platform'].__path__[0] )
mod = importer.load_module(full_name)
except ImportError:
raise SCons.Errors.UserError("No platform named '%s'" % name)
setattr(SCons.Platform, name, mod)
return sys.modules[full_name]
def DefaultToolList(platform, env):
"""Select a default tool list for the specified platform.
"""
return SCons.Tool.tool_list(platform, env)
class PlatformSpec(object):
def __init__(self, name, generate):
self.name = name
self.generate = generate
def __call__(self, *args, **kw):
return self.generate(*args, **kw)
def __str__(self):
return self.name
class TempFileMunge(object):
"""A callable class. You can set an Environment variable to this,
then call it with a string argument, then it will perform temporary
file substitution on it. This is used to circumvent the long command
line limitation.
Example usage:
env["TEMPFILE"] = TempFileMunge
env["LINKCOM"] = "${TEMPFILE('$LINK $TARGET $SOURCES')}"
By default, the name of the temporary file used begins with a
prefix of '@'. This may be configred for other tool chains by
setting '$TEMPFILEPREFIX'.
env["TEMPFILEPREFIX"] = '-@' # diab compiler
env["TEMPFILEPREFIX"] = '-via' # arm tool chain
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature):
if for_signature:
# If we're being called for signature calculation, it's
# because we're being called by the string expansion in
# Subst.py, which has the logic to strip any $( $) that
# may be in the command line we squirreled away. So we
# just return the raw command line and let the upper
# string substitution layers do their thing.
return self.cmd
# Now we're actually being called because someone is actually
# going to try to execute the command, so we have to do our
# own expansion.
cmd = env.subst_list(self.cmd, SCons.Subst.SUBST_CMD, target, source)[0]
try:
maxline = int(env.subst('$MAXLINELENGTH'))
except ValueError:
maxline = 2048
length = 0
for c in cmd:
length += len(c)
if length <= maxline:
return self.cmd
# We do a normpath because mktemp() has what appears to be
# a bug in Windows that will use a forward slash as a path
# delimiter. Windows's link mistakes that for a command line
# switch and barfs.
#
# We use the .lnk suffix for the benefit of the Phar Lap
# linkloc linker, which likes to append an .lnk suffix if
# none is given.
(fd, tmp) = tempfile.mkstemp('.lnk', text=True)
native_tmp = SCons.Util.get_native_path(os.path.normpath(tmp))
if env['SHELL'] and env['SHELL'] == 'sh':
# The sh shell will try to escape the backslashes in the
# path, so unescape them.
native_tmp = native_tmp.replace('\\', r'\\\\')
# In Cygwin, we want to use rm to delete the temporary
# file, because del does not exist in the sh shell.
rm = env.Detect('rm') or 'del'
else:
# Don't use 'rm' if the shell is not sh, because rm won't
# work with the Windows shells (cmd.exe or command.com) or
# Windows path names.
rm = 'del'
prefix = env.subst('$TEMPFILEPREFIX')
if not prefix:
prefix = '@'
args = list(map(SCons.Subst.quote_spaces, cmd[1:]))
os.write(fd, "\n".join(args) + "\n")
os.close(fd)
# XXX Using the SCons.Action.print_actions value directly
# like this is bogus, but expedient. This class should
# really be rewritten as an Action that defines the
# __call__() and strfunction() methods and lets the
# normal action-execution logic handle whether or not to
# print/execute the action. The problem, though, is all
# of that is decided before we execute this method as
# part of expanding the $TEMPFILE construction variable.
# Consequently, refactoring this will have to wait until
# we get more flexible with allowing Actions to exist
# independently and get strung together arbitrarily like
# Ant tasks. In the meantime, it's going to be more
# user-friendly to not let obsession with architectural
# purity get in the way of just being helpful, so we'll
# reach into SCons.Action directly.
if SCons.Action.print_actions:
print("Using tempfile "+native_tmp+" for command line:\n"+
str(cmd[0]) + " " + " ".join(args))
return [ cmd[0], prefix + native_tmp + '\n' + rm, native_tmp ]
def Platform(name = platform_default()):
"""Select a canned Platform specification.
"""
module = platform_module(name)
spec = PlatformSpec(name, module.generate)
return spec
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Layernode-0.0.3.tar.gz/Layernode-0.0.3/layernode/peer_check.py | import time
import uuid
from layernode import blockchain
from layernode import ntwrk
from layernode import tools
from layernode.service import Service, threaded, sync
class PeerCheckService(Service):
def __init__(self, engine, new_peers):
# This logic might change. Here we add new peers while initializing the service
Service.__init__(self, 'peers_check')
self.engine = engine
self.new_peers = []
self.new_peers = new_peers
self.db = None
self.blockchain = None
self.clientdb = None
self.node_id = None
# self.old_peers = []
def on_register(self):
self.db = self.engine.db
self.blockchain = self.engine.blockchain
self.clientdb = self.engine.clientdb
for peer in self.new_peers:
self.clientdb.add_peer(peer, 'friend_of_mine')
self.node_id = self.db.get('node_id')
return True
@threaded
def listen(self):
"""
Pseudorandomly select a peer to check.
If blockchain is synchronizing, don't check anyone.
:return:
"""
if self.blockchain.get_chain_state() == blockchain.BlockchainService.SYNCING:
time.sleep(0.1)
return
peers = self.clientdb.get_peers()
# print('peer check listen peers', peers)
if len(peers) > 0:
i = tools.exponential_random(3.0 / 4) % len(peers)
peer = peers[i]
t1 = time.time()
peer_result = self.peer_check(peer)
t2 = time.time()
peer['rank'] *= 0.8
if peer_result == 1: # We give them blocks. They do not contribute much information
peer['rank'] += 0.4 * (t2 - t1)
elif peer_result == 2: # We are at the same level. Treat them equal
peer['rank'] += 0.2 * (t2 - t1)
elif peer_result == 3:
# They give us blocks. Increase their rank.
# If blocks are faulty, they will get punished severely.
peer['rank'] += 0.1 * (t2 - t1)
else:
peer['rank'] += 0.2 * 30
self.clientdb.update_peer(peer)
time.sleep(0.1)
@sync
def peer_check(self, peer):
peer_ip_port = (peer['ip'], peer['port'])
greeted = ntwrk.command(peer_ip_port,
{
'action': 'greetings',
'node_id': self.node_id,
'port': self.engine.config['port']['peers'],
'length': self.db.get('length'),
'identity_length': self.db.get('identity_length'),
'request_length': self.db.get('request_length')
},
self.node_id)
if not isinstance(greeted, dict):
return None
if 'error' in greeted.keys():
return None
peer['request_length'] = greeted['request_length']
peer['identity_length'] = greeted['identity_length']
peer['length'] = greeted['length']
self.clientdb.update_peer(peer)
known_length = self.clientdb.get('known_length')
if greeted['length'] > known_length:
self.clientdb.put('known_length', greeted['length'])
known_identity_length = self.clientdb.get('known_identity_length')
if greeted['identity_length'] > known_identity_length:
self.clientdb.put('known_identity_length',
greeted['identity_length'])
known_request_length = self.clientdb.get('known_request_length')
if greeted['request_length'] > known_request_length:
self.clientdb.put('known_request_length',
greeted['request_length'])
length = self.db.get('length')
identity_length = self.db.get('identity_length')
request_length = self.db.get('request_length')
# This is the most important peer operation part
# We are deciding what to do with this peer. We can either
# send them blocks, share txs or download blocks.
# Only transfer peers at every minute.
peer_history = self.clientdb.get_peer_history(peer['node_id'])
if time.time() - peer_history['peer_transfer'] > 60:
my_peers = self.clientdb.get_peers()
their_peers = ntwrk.command(
peer_ip_port, {'action': 'peers'}, self.node_id)
if type(their_peers) == list:
for p in their_peers:
self.clientdb.add_peer(p, 'friend_of_mine')
for p in my_peers:
ntwrk.command(
peer_ip_port, {'action': 'receive_peer', 'peer': p}, self.node_id)
peer_history['peer_transfer'] = time.time()
self.clientdb.set_peer_history(peer['node_id'], peer_history)
# if greeted['identity_length'] < identity_length:
# self.give_identity(peer_ip_port, greeted['identity_length'])
# return 1
# elif greeted['identity_length'] == identity_length:
# # self.ask_for_txs(peer_ip_port)
# if greeted['length'] < length:
# self.give_score(peer_ip_port, greeted['length'])
# return 1
# elif greeted['length'] == length:
# return 2
# else:
# self.download_scores(
# peer_ip_port, greeted['length'], length, peer['node_id'])
# return 3
# # return 2
# else:
# self.download_identities(
# peer_ip_port, greeted['identity_length'], identity_length, peer['node_id'])
# return 3
if greeted['identity_length'] < identity_length:
self.give_identity(peer_ip_port, greeted['identity_length'])
return 1
elif greeted['identity_length'] == identity_length:
# self.ask_for_txs(peer_ip_port)
if greeted['length'] < length:
self.give_score(peer_ip_port, greeted['length'])
return 1
elif greeted['length'] == length:
if greeted['request_length'] < request_length:
self.give_request(peer_ip_port, greeted['request_length'])
return 1
elif greeted['request_length'] == request_length:
return 2
else:
self.download_requests(
peer_ip_port, greeted['request_length'], request_length, peer['node_id'])
return 3
else:
self.download_scores(
peer_ip_port, greeted['length'], length, peer['node_id'])
return 3
# return 2
else:
self.download_identities(
peer_ip_port, greeted['identity_length'], identity_length, peer['node_id'])
return 3
def download_scores(self, peer_ip_port, score_count_peer, length, node_id):
b = [max(0, length - 10), min(score_count_peer,
length + self.engine.config['peers']['download_limit'])]
scores = ntwrk.command(
peer_ip_port, {'action': 'range_request', 'range': b}, self.node_id)
if isinstance(scores, list):
self.blockchain.scores_queue.put((scores, node_id))
def download_identities(self, peer_ip_port, identity_count_peer, identity_length, node_id):
b = [max(0, identity_length - 10), min(identity_count_peer,
identity_length + self.engine.config['peers']['download_limit'])]
identities = ntwrk.command(
peer_ip_port, {'action': 'identity_range_request', 'range': b}, self.node_id)
if isinstance(identities, list):
self.blockchain.identities_queue.put((identities, node_id))
def download_requests(self, peer_ip_port, request_count_peer, request_length, node_id):
b = [max(0, request_length - 10), min(request_count_peer,
request_length + self.engine.config['peers']['download_limit'])]
_requests = ntwrk.command(
peer_ip_port, {'action': 'request_range_request', 'range': b}, self.node_id)
if isinstance(_requests, list):
self.blockchain.requests_queue.put((_requests, node_id))
def give_score(self, peer_ip_port, score_count_peer):
scores = []
b = [max(score_count_peer - 5, 0), min(self.db.get('length'),
score_count_peer + self.engine.config['peers']['download_limit'])]
for i in range(b[0], b[1]):
scores.append(self.blockchain.get_score(i))
ntwrk.command(
peer_ip_port, {'action': 'push_score', 'scores': scores}, self.node_id)
return 0
def give_identity(self, peer_ip_port, identity_count_peer):
identities = []
b = [max(identity_count_peer - 5, 0), min(self.db.get('identity_length'),
identity_count_peer + self.engine.config['peers']['download_limit'])]
for i in range(b[0], b[1]):
identities.append(self.blockchain.get_identity(i))
ntwrk.command(
peer_ip_port, {'action': 'push_identity', 'identities': identities}, self.node_id)
return 0
def give_request(self, peer_ip_port, request_count_peer):
_requests = []
b = [max(request_count_peer - 5, 0), min(self.db.get('request_length'),
request_count_peer + self.engine.config['peers']['download_limit'])]
for i in range(b[0], b[1]):
_requests.append(self.blockchain.get_request(i))
ntwrk.command(
peer_ip_port, {'action': 'push_request', 'requests': _requests}, self.node_id)
return 0 | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/domains/complexfield.py |
from __future__ import annotations
import mpmath
from ..core import Float, I
from ..polys.polyerrors import CoercionFailed
from .characteristiczero import CharacteristicZero
from .field import Field
from .mpelements import MPContext
from .simpledomain import SimpleDomain
class ComplexField(CharacteristicZero, SimpleDomain, Field):
"""Complex numbers up to the given precision."""
rep = 'CC'
is_ComplexField = True
is_Exact = False
is_Numerical = True
_default_precision = 53
@property
def has_default_precision(self):
return self.precision == self._default_precision
@property
def precision(self):
return self._context.prec
@property
def dps(self):
return self._context.dps
@property
def tolerance(self):
return self._context.tolerance
def __new__(cls, prec=_default_precision, dps=None, tol=None):
context = MPContext(prec, dps, tol)
obj = super().__new__(cls)
try:
obj.dtype = _complexes_cache[(context.prec, context.tolerance)]
except KeyError:
_complexes_cache[(context.prec, context.tolerance)] = obj.dtype = context.mpc
context._parent = obj
obj._context = context
obj._hash = hash((cls.__name__, obj.dtype, context.prec, context.tolerance))
obj.zero = obj.dtype(0)
obj.one = obj.dtype(1)
return obj
def __getnewargs_ex__(self):
return (), {'prec': self.precision,
'tol': mpmath.mpf(self.tolerance._mpf_)}
def __eq__(self, other):
return (isinstance(other, ComplexField)
and self.precision == other.precision
and self.tolerance == other.tolerance)
def __hash__(self):
return self._hash
def to_expr(self, element):
return Float(element.real, self.dps) + I*Float(element.imag, self.dps)
def from_expr(self, expr):
number = expr.evalf(self.dps)
real, imag = number.as_real_imag()
if real.is_Number and imag.is_Number:
return self.dtype(real, imag)
else:
raise CoercionFailed(f'expected complex number, got {expr}')
def _from_PythonIntegerRing(self, element, base):
return self.dtype(element)
_from_GMPYIntegerRing = _from_PythonIntegerRing
_from_RealField = _from_PythonIntegerRing
_from_ComplexField = _from_PythonIntegerRing
def _from_PythonRationalField(self, element, base):
return self.dtype(element.numerator) / element.denominator
_from_GMPYRationalField = _from_PythonRationalField
def _from_AlgebraicField(self, element, base):
return self.from_expr(base.to_expr(element))
def get_exact(self):
from . import QQ
return QQ.algebraic_field(I)
def gcd(self, a, b):
return self.one
def almosteq(self, a, b, tolerance=None):
"""Check if ``a`` and ``b`` are almost equal."""
return self._context.almosteq(a, b, tolerance)
def is_normal(self, a):
return True
_complexes_cache: dict[tuple, ComplexField] = {}
CC = ComplexField() | PypiClean |
/MagicBus-4.1.2.tar.gz/MagicBus-4.1.2/magicbus/plugins/tasks.py |
import os
import re
import sys
import time
import threading
from magicbus.plugins import SimplePlugin
from magicbus.compat import get_thread_ident, TimerClass
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file
# has "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class PerpetualTimer(TimerClass):
"""A responsive subclass of threading.Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which can
result in pretty high CPU usage.
"""
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
self.bus.log('Error in perpetual timer thread function %r.' %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to wait
for each interval, which isn't very responsive; that is, even if you call
self.cancel(), you'll have to wait until the sleep() call finishes before
the thread stops. To compensate, it defaults to being daemonic, which means
it won't delay stopping the whole process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
# Sleep. Split up so we respond to cancel within one second.
wholesecs, fracsecs = divmod(self.interval, 1)
for s in range(int(wholesecs)):
time.sleep(1)
if not self.running:
return
if fracsecs:
time.sleep(fracsecs)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log('Error in background task thread function %r.'
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
def _set_daemon(self):
return True
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<magicbus.plugins.tasks.BackgroundTask>` thread.
"""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def START(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus=self.bus)
self.thread.setName(threadname)
self.thread.start()
self.bus.log('Started monitor thread %r.' % threadname)
else:
self.bus.log('Monitor thread %r already started.' % threadname)
START.priority = 70
def STOP(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log('No thread running for %s.' %
self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
if not self.thread.daemon:
self.bus.log('Joining %r' % name)
self.thread.join()
self.bus.log('Stopped thread %r.' % name)
self.thread = None
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
bus.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can adjust
the ``match`` attribute, a regular expression. For example, to stop
monitoring the bus itself::
bus.autoreload.match = r'^(?!magicbus).+'
Like all :class:`Monitor<magicbus.plugins.tasks.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def START(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.START(self)
START.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
files = set()
for k, m in list(sys.modules.items()):
if re.match(self.match, k):
if (
hasattr(m, '__loader__') and
hasattr(m.__loader__, 'archive')
):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app
# doesn't break me
f = os.path.normpath(
os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log('Restarting because %s changed.' %
filename)
self.thread.cancel()
self.bus.log('Stopped thread %r.' %
self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = get_thread_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = get_thread_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def STOP(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear() | PypiClean |
/DeriveAlive-1.0.1.tar.gz/DeriveAlive-1.0.1/README.md | [](https://travis-ci.com/cs207-group19/cs207-FinalProject.svg?branch=master)
[](https://coveralls.io/github/cs207-group19/cs207-FinalProject?branch=master)
[](https://cs-207-final-project-group-19.readthedocs.io/en/latest/?badge=latest)
Note: if the coverage % seems outdated, one can clear browser cache, or click the badge which links to the coverage site, or run the command `pytest test_DeriveAlive.py` from the `tests/` folder.
# cs207-FinalProject
Repository for CS 207 Final Project (Autodifferentiation), Group 19, Fall 2018
### Group 19
### Group name: DeriveAlive
### Group members: Stephen Slater, Chen Shi, Yue Sun
Welcome to DeriveAlive! This is a package for autodifferentiation. It includes several additional features making use of autodifferentiation, including root-finding, optimization (gradient descent, BFGS, and dataset compatability for optimizing mean squared error), and quadratic spline fitting. There is extensive documentation at the link below, and the project is publicly available for download via PyPI by running `pip install DeriveAlive`. Enjoy!
#### Documentation: https://cs-207-final-project-group-19.readthedocs.io/en/latest/
#### PyPI: https://pypi.org/project/DeriveAlive/#files
| PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Scanner/Prog.py |
__revision__ = "src/engine/SCons/Scanner/Prog.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
# global, set by --debug=findlibs
print_find_libs = None
def ProgramScanner(**kw):
"""Return a prototype Scanner instance for scanning executable
files for static-lib dependencies"""
kw['path_function'] = SCons.Scanner.FindPathDirs('LIBPATH')
ps = SCons.Scanner.Base(scan, "ProgramScanner", **kw)
return ps
def _subst_libs(env, libs):
"""
Substitute environment variables and split into list.
"""
if SCons.Util.is_String(libs):
libs = env.subst(libs)
if SCons.Util.is_String(libs):
libs = libs.split()
elif SCons.Util.is_Sequence(libs):
_libs = []
for l in libs:
_libs += _subst_libs(env, l)
libs = _libs
else:
# libs is an object (Node, for example)
libs = [libs]
return libs
def scan(node, env, libpath = ()):
"""
This scanner scans program files for static-library
dependencies. It will search the LIBPATH environment variable
for libraries specified in the LIBS variable, returning any
files it finds as dependencies.
"""
try:
libs = env['LIBS']
except KeyError:
# There are no LIBS in this environment, so just return a null list:
return []
libs = _subst_libs(env, libs)
try:
prefix = env['LIBPREFIXES']
if not SCons.Util.is_List(prefix):
prefix = [ prefix ]
except KeyError:
prefix = [ '' ]
try:
suffix = env['LIBSUFFIXES']
if not SCons.Util.is_List(suffix):
suffix = [ suffix ]
except KeyError:
suffix = [ '' ]
pairs = []
for suf in map(env.subst, suffix):
for pref in map(env.subst, prefix):
pairs.append((pref, suf))
result = []
if callable(libpath):
libpath = libpath()
find_file = SCons.Node.FS.find_file
adjustixes = SCons.Util.adjustixes
for lib in libs:
if SCons.Util.is_String(lib):
for pref, suf in pairs:
l = adjustixes(lib, pref, suf)
l = find_file(l, libpath, verbose=print_find_libs)
if l:
result.append(l)
else:
result.append(lib)
return result
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/FALCONN-1.3.1.tar.gz/FALCONN-1.3.1/external/pybind11/docs/release.rst | To release a new version of pybind11:
- Update the version number and push to pypi
- Update ``pybind11/_version.py`` (set release version, remove 'dev').
- Update ``PYBIND11_VERSION_MAJOR`` etc. in ``include/pybind11/detail/common.h``.
- Ensure that all the information in ``setup.py`` is up-to-date.
- Update version in ``docs/conf.py``.
- Tag release date in ``docs/changelog.rst``.
- ``git add`` and ``git commit``.
- if new minor version: ``git checkout -b vX.Y``, ``git push -u origin vX.Y``
- ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``.
- ``git push``
- ``git push --tags``.
- ``python setup.py sdist upload``.
- ``python setup.py bdist_wheel upload``.
- Update conda-forge (https://github.com/conda-forge/pybind11-feedstock) via PR
- download release package from Github: ``wget https://github.com/pybind/pybind11/archive/vX.Y.Z.tar.gz``
- compute checksum: ``shasum -a 256 vX.Y.Z.tar.gz``
- change version number and checksum in ``recipe/meta.yml``
- Get back to work
- Update ``_version.py`` (add 'dev' and increment minor).
- Update version in ``docs/conf.py``
- Update version macros in ``include/pybind11/common.h``
- ``git add`` and ``git commit``.
``git push``
| PypiClean |
/Kallithea-0.7.0.tar.gz/Kallithea-0.7.0/kallithea/model/pull_request.py | import datetime
import logging
import re
from tg import request
from tg.i18n import ugettext as _
from kallithea.lib import auth, hooks, webutils
from kallithea.lib.utils import extract_mentioned_users
from kallithea.lib.utils2 import ascii_bytes, short_ref_name
from kallithea.model import changeset_status, comment, db, meta, notification
log = logging.getLogger(__name__)
def _assert_valid_reviewers(seq):
"""Sanity check: elements are actual User objects, and not the default user."""
assert not any(user.is_default_user for user in seq)
class PullRequestModel(object):
def add_reviewers(self, user, pr, reviewers, mention_recipients=None):
"""Add reviewer and send notification to them.
"""
reviewers = set(reviewers)
_assert_valid_reviewers(reviewers)
if mention_recipients is not None:
mention_recipients = set(mention_recipients) - reviewers
_assert_valid_reviewers(mention_recipients)
redundant_reviewers = set(db.User.query() \
.join(db.PullRequestReviewer) \
.filter(db.PullRequestReviewer.pull_request == pr) \
.filter(db.PullRequestReviewer.user_id.in_(r.user_id for r in reviewers))
.all())
if redundant_reviewers:
log.debug('Following reviewers were already part of pull request %s: %s', pr.pull_request_id, redundant_reviewers)
reviewers -= redundant_reviewers
log.debug('Adding reviewers to pull request %s: %s', pr.pull_request_id, reviewers)
for reviewer in reviewers:
prr = db.PullRequestReviewer(reviewer, pr)
meta.Session().add(prr)
# notification to reviewers
pr_url = pr.url(canonical=True)
threading = ['%s-pr-%s@%s' % (pr.other_repo.repo_name,
pr.pull_request_id,
webutils.canonical_hostname())]
body = pr.description
_org_ref_type, org_ref_name, _org_rev = pr.org_ref.split(':')
_other_ref_type, other_ref_name, _other_rev = pr.other_ref.split(':')
revision_data = [(x.raw_id, x.message)
for x in map(pr.org_repo.get_changeset, pr.revisions)]
email_kwargs = {
'pr_title': pr.title,
'pr_title_short': webutils.shorter(pr.title, 50),
'pr_user_created': user.full_name_and_username,
'pr_repo_url': webutils.canonical_url('summary_home', repo_name=pr.other_repo.repo_name),
'pr_url': pr_url,
'pr_revisions': revision_data,
'repo_name': pr.other_repo.repo_name,
'org_repo_name': pr.org_repo.repo_name,
'pr_nice_id': pr.nice_id(),
'pr_target_repo': webutils.canonical_url('summary_home',
repo_name=pr.other_repo.repo_name),
'pr_target_branch': other_ref_name,
'pr_source_repo': webutils.canonical_url('summary_home',
repo_name=pr.org_repo.repo_name),
'pr_source_branch': org_ref_name,
'pr_owner': pr.owner,
'pr_owner_username': pr.owner.username,
'pr_username': user.username,
'threading': threading,
'is_mention': False,
}
if reviewers:
notification.NotificationModel().create(created_by=user, body=body,
recipients=reviewers,
type_=notification.NotificationModel.TYPE_PULL_REQUEST,
email_kwargs=email_kwargs)
if mention_recipients:
email_kwargs['is_mention'] = True
notification.NotificationModel().create(created_by=user, body=body,
recipients=mention_recipients,
type_=notification.NotificationModel.TYPE_PULL_REQUEST,
email_kwargs=email_kwargs)
return reviewers, redundant_reviewers
def mention_from_description(self, user, pr, old_description=''):
mention_recipients = (extract_mentioned_users(pr.description) -
extract_mentioned_users(old_description))
log.debug("Mentioning %s", mention_recipients)
self.add_reviewers(user, pr, set(), mention_recipients)
def remove_reviewers(self, user, pull_request, reviewers):
"""Remove specified users from being reviewers of the PR."""
if not reviewers:
return # avoid SQLAlchemy warning about empty sequence for IN-predicate
db.PullRequestReviewer.query() \
.filter_by(pull_request=pull_request) \
.filter(db.PullRequestReviewer.user_id.in_(r.user_id for r in reviewers)) \
.delete(synchronize_session='fetch') # the default of 'evaluate' is not available
def delete(self, pull_request):
pull_request = db.PullRequest.guess_instance(pull_request)
meta.Session().delete(pull_request)
if pull_request.org_repo.scm_instance.alias == 'git':
# remove a ref under refs/pull/ so that commits can be garbage-collected
try:
del pull_request.org_repo.scm_instance._repo[b"refs/pull/%d/head" % pull_request.pull_request_id]
except KeyError:
pass
def close_pull_request(self, pull_request):
pull_request = db.PullRequest.guess_instance(pull_request)
pull_request.status = db.PullRequest.STATUS_CLOSED
pull_request.updated_on = datetime.datetime.now()
class CreatePullRequestAction(object):
class ValidationError(Exception):
pass
class Empty(ValidationError):
pass
class AmbiguousAncestor(ValidationError):
pass
class Unauthorized(ValidationError):
pass
@staticmethod
def is_user_authorized(org_repo, other_repo):
"""Performs authorization check with only the minimum amount of
information needed for such a check, rather than a full command
object.
"""
if (auth.HasRepoPermissionLevel('read')(org_repo.repo_name) and
auth.HasRepoPermissionLevel('read')(other_repo.repo_name)
):
return True
return False
def __init__(self, org_repo, other_repo, org_ref, other_ref, title, description, owner, reviewers):
reviewers = set(reviewers)
_assert_valid_reviewers(reviewers)
(org_ref_type,
org_ref_name,
org_rev) = org_ref.split(':')
org_display = short_ref_name(org_ref_type, org_ref_name)
if org_ref_type == 'rev':
cs = org_repo.scm_instance.get_changeset(org_rev)
org_ref = 'branch:%s:%s' % (cs.branch, cs.raw_id)
(other_ref_type,
other_ref_name,
other_rev) = other_ref.split(':')
if other_ref_type == 'rev':
cs = other_repo.scm_instance.get_changeset(other_rev)
other_ref_name = cs.raw_id[:12]
other_ref = '%s:%s:%s' % (other_ref_type, other_ref_name, cs.raw_id)
other_display = short_ref_name(other_ref_type, other_ref_name)
cs_ranges, _cs_ranges_not, ancestor_revs = \
org_repo.scm_instance.get_diff_changesets(other_rev, org_repo.scm_instance, org_rev) # org and other "swapped"
if not cs_ranges:
raise self.Empty(_('Cannot create empty pull request'))
if not ancestor_revs:
ancestor_rev = org_repo.scm_instance.EMPTY_CHANGESET
elif len(ancestor_revs) == 1:
ancestor_rev = ancestor_revs[0]
else:
raise self.AmbiguousAncestor(
_('Cannot create pull request - criss cross merge detected, please merge a later %s revision to %s')
% (other_ref_name, org_ref_name))
self.revisions = [cs_.raw_id for cs_ in cs_ranges]
# hack: ancestor_rev is not an other_rev but we want to show the
# requested destination and have the exact ancestor
other_ref = '%s:%s:%s' % (other_ref_type, other_ref_name, ancestor_rev)
if not title:
if org_repo == other_repo:
title = '%s to %s' % (org_display, other_display)
else:
title = '%s#%s to %s#%s' % (org_repo.repo_name, org_display,
other_repo.repo_name, other_display)
description = description or _('No description')
self.org_repo = org_repo
self.other_repo = other_repo
self.org_ref = org_ref
self.org_rev = org_rev
self.other_ref = other_ref
self.title = title
self.description = description
self.owner = owner
self.reviewers = reviewers
if not CreatePullRequestAction.is_user_authorized(self.org_repo, self.other_repo):
raise self.Unauthorized(_('You are not authorized to create the pull request'))
def execute(self):
created_by = db.User.get(request.authuser.user_id)
pr = db.PullRequest()
pr.org_repo = self.org_repo
pr.org_ref = self.org_ref
pr.other_repo = self.other_repo
pr.other_ref = self.other_ref
pr.revisions = self.revisions
pr.title = self.title
pr.description = self.description
pr.owner = self.owner
meta.Session().add(pr)
meta.Session().flush() # make database assign pull_request_id
if self.org_repo.scm_instance.alias == 'git':
# create a ref under refs/pull/ so that commits don't get garbage-collected
self.org_repo.scm_instance._repo[b"refs/pull/%d/head" % pr.pull_request_id] = ascii_bytes(self.org_rev)
# reset state to under-review
new_comment = comment.ChangesetCommentsModel().create(
text='',
repo=self.org_repo,
author=created_by,
pull_request=pr,
send_email=False,
status_change=db.ChangesetStatus.STATUS_UNDER_REVIEW,
)
changeset_status.ChangesetStatusModel().set_status(
self.org_repo,
db.ChangesetStatus.STATUS_UNDER_REVIEW,
created_by,
new_comment,
pull_request=pr,
)
mention_recipients = extract_mentioned_users(self.description)
PullRequestModel().add_reviewers(created_by, pr, self.reviewers, mention_recipients)
hooks.log_create_pullrequest(pr.get_dict(), created_by)
return pr
class CreatePullRequestIterationAction(object):
@staticmethod
def is_user_authorized(old_pull_request):
"""Performs authorization check with only the minimum amount of
information needed for such a check, rather than a full command
object.
"""
if auth.HasPermissionAny('hg.admin')():
return True
# Authorized to edit the old PR?
if request.authuser.user_id != old_pull_request.owner_id:
return False
# Authorized to create a new PR?
if not CreatePullRequestAction.is_user_authorized(old_pull_request.org_repo, old_pull_request.other_repo):
return False
return True
def __init__(self, old_pull_request, new_org_rev, new_other_rev, title, description, owner, reviewers):
self.old_pull_request = old_pull_request
org_repo = old_pull_request.org_repo
org_ref_type, org_ref_name, org_rev = old_pull_request.org_ref.split(':')
other_repo = old_pull_request.other_repo
other_ref_type, other_ref_name, other_rev = old_pull_request.other_ref.split(':') # other_rev is ancestor
#assert other_ref_type == 'branch', other_ref_type # TODO: what if not?
new_org_ref = '%s:%s:%s' % (org_ref_type, org_ref_name, new_org_rev)
new_other_ref = '%s:%s:%s' % (other_ref_type, other_ref_name, new_other_rev)
self.create_action = CreatePullRequestAction(org_repo, other_repo, new_org_ref, new_other_ref, None, None, owner, reviewers)
# Generate complete title/description
old_revisions = set(old_pull_request.revisions)
revisions = self.create_action.revisions
new_revisions = [r for r in revisions if r not in old_revisions]
lost = old_revisions.difference(revisions)
infos = ['This is a new iteration of %s "%s".' %
(webutils.canonical_url('pullrequest_show', repo_name=old_pull_request.other_repo.repo_name,
pull_request_id=old_pull_request.pull_request_id),
old_pull_request.title)]
if lost:
infos.append(_('Missing changesets since the previous iteration:'))
for r in old_pull_request.revisions:
if r in lost:
rev_desc = org_repo.get_changeset(r).message.split('\n')[0]
infos.append(' %s %s' % (r[:12], rev_desc))
if new_revisions:
infos.append(_('New changesets on %s %s since the previous iteration:') % (org_ref_type, org_ref_name))
for r in reversed(revisions):
if r in new_revisions:
rev_desc = org_repo.get_changeset(r).message.split('\n')[0]
infos.append(' %s %s' % (r[:12], webutils.shorter(rev_desc, 80)))
if self.create_action.other_ref == old_pull_request.other_ref:
infos.append(_("Ancestor didn't change - diff since previous iteration:"))
infos.append(webutils.canonical_url('compare_url',
repo_name=org_repo.repo_name, # other_repo is always same as repo_name
org_ref_type='rev', org_ref_name=org_rev[:12], # use old org_rev as base
other_ref_type='rev', other_ref_name=new_org_rev[:12],
)) # note: linear diff, merge or not doesn't matter
else:
infos.append(_('This iteration is based on another %s revision and there is no simple diff.') % other_ref_name)
else:
infos.append(_('No changes found on %s %s since previous iteration.') % (org_ref_type, org_ref_name))
# TODO: fail?
v = 2
m = re.match(r'(.*)\(v(\d+)\)\s*$', title)
if m is not None:
title = m.group(1)
v = int(m.group(2)) + 1
self.create_action.title = '%s (v%s)' % (title.strip(), v)
# using a mail-like separator, insert new iteration info in description with latest first
descriptions = description.replace('\r\n', '\n').split('\n-- \n', 1)
description = descriptions[0].strip() + '\n\n-- \n' + '\n'.join(infos)
if len(descriptions) > 1:
description += '\n\n' + descriptions[1].strip()
self.create_action.description = description
if not CreatePullRequestIterationAction.is_user_authorized(self.old_pull_request):
raise CreatePullRequestAction.Unauthorized(_('You are not authorized to create the pull request'))
def execute(self):
pull_request = self.create_action.execute()
# Close old iteration
comment.ChangesetCommentsModel().create(
text=_('Closed, next iteration: %s .') % pull_request.url(canonical=True),
repo=self.old_pull_request.other_repo_id,
author=request.authuser.user_id,
pull_request=self.old_pull_request.pull_request_id,
closing_pr=True)
PullRequestModel().close_pull_request(self.old_pull_request.pull_request_id)
return pull_request | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/jquery/ui/minified/jquery.ui.position.min.js | (function(c){c.ui=c.ui||{};var n=/left|center|right/,o=/top|center|bottom/,t=c.fn.position,u=c.fn.offset;c.fn.position=function(b){if(!b||!b.of)return t.apply(this,arguments);b=c.extend({},b);var a=c(b.of),d=a[0],g=(b.collision||"flip").split(" "),e=b.offset?b.offset.split(" "):[0,0],h,k,j;if(d.nodeType===9){h=a.width();k=a.height();j={top:0,left:0}}else if(d.setTimeout){h=a.width();k=a.height();j={top:a.scrollTop(),left:a.scrollLeft()}}else if(d.preventDefault){b.at="left top";h=k=0;j={top:b.of.pageY,
left:b.of.pageX}}else{h=a.outerWidth();k=a.outerHeight();j=a.offset()}c.each(["my","at"],function(){var f=(b[this]||"").split(" ");if(f.length===1)f=n.test(f[0])?f.concat(["center"]):o.test(f[0])?["center"].concat(f):["center","center"];f[0]=n.test(f[0])?f[0]:"center";f[1]=o.test(f[1])?f[1]:"center";b[this]=f});if(g.length===1)g[1]=g[0];e[0]=parseInt(e[0],10)||0;if(e.length===1)e[1]=e[0];e[1]=parseInt(e[1],10)||0;if(b.at[0]==="right")j.left+=h;else if(b.at[0]==="center")j.left+=h/2;if(b.at[1]==="bottom")j.top+=
k;else if(b.at[1]==="center")j.top+=k/2;j.left+=e[0];j.top+=e[1];return this.each(function(){var f=c(this),l=f.outerWidth(),m=f.outerHeight(),p=parseInt(c.curCSS(this,"marginLeft",true))||0,q=parseInt(c.curCSS(this,"marginTop",true))||0,v=l+p+(parseInt(c.curCSS(this,"marginRight",true))||0),w=m+q+(parseInt(c.curCSS(this,"marginBottom",true))||0),i=c.extend({},j),r;if(b.my[0]==="right")i.left-=l;else if(b.my[0]==="center")i.left-=l/2;if(b.my[1]==="bottom")i.top-=m;else if(b.my[1]==="center")i.top-=
m/2;i.left=Math.round(i.left);i.top=Math.round(i.top);r={left:i.left-p,top:i.top-q};c.each(["left","top"],function(s,x){c.ui.position[g[s]]&&c.ui.position[g[s]][x](i,{targetWidth:h,targetHeight:k,elemWidth:l,elemHeight:m,collisionPosition:r,collisionWidth:v,collisionHeight:w,offset:e,my:b.my,at:b.at})});c.fn.bgiframe&&f.bgiframe();f.offset(c.extend(i,{using:b.using}))})};c.ui.position={fit:{left:function(b,a){var d=c(window);d=a.collisionPosition.left+a.collisionWidth-d.width()-d.scrollLeft();b.left=
d>0?b.left-d:Math.max(b.left-a.collisionPosition.left,b.left)},top:function(b,a){var d=c(window);d=a.collisionPosition.top+a.collisionHeight-d.height()-d.scrollTop();b.top=d>0?b.top-d:Math.max(b.top-a.collisionPosition.top,b.top)}},flip:{left:function(b,a){if(a.at[0]!=="center"){var d=c(window);d=a.collisionPosition.left+a.collisionWidth-d.width()-d.scrollLeft();var g=a.my[0]==="left"?-a.elemWidth:a.my[0]==="right"?a.elemWidth:0,e=a.at[0]==="left"?a.targetWidth:-a.targetWidth,h=-2*a.offset[0];b.left+=
a.collisionPosition.left<0?g+e+h:d>0?g+e+h:0}},top:function(b,a){if(a.at[1]!=="center"){var d=c(window);d=a.collisionPosition.top+a.collisionHeight-d.height()-d.scrollTop();var g=a.my[1]==="top"?-a.elemHeight:a.my[1]==="bottom"?a.elemHeight:0,e=a.at[1]==="top"?a.targetHeight:-a.targetHeight,h=-2*a.offset[1];b.top+=a.collisionPosition.top<0?g+e+h:d>0?g+e+h:0}}}};if(!c.offset.setOffset){c.offset.setOffset=function(b,a){if(/static/.test(c.curCSS(b,"position")))b.style.position="relative";var d=c(b),
g=d.offset(),e=parseInt(c.curCSS(b,"top",true),10)||0,h=parseInt(c.curCSS(b,"left",true),10)||0;g={top:a.top-g.top+e,left:a.left-g.left+h};"using"in a?a.using.call(b,g):d.css(g)};c.fn.offset=function(b){var a=this[0];if(!a||!a.ownerDocument)return null;if(b)return this.each(function(){c.offset.setOffset(this,b)});return u.call(this)}}})(jQuery); | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas_rhino/utilities/constructors.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.utilities import geometric_key
import Rhino
import scriptcontext as sc
__all__ = ["volmesh_from_polysurfaces"]
def volmesh_from_polysurfaces(cls, guids, precision=None):
"""Construct a volumetric mesh from given polysurfaces.
Parameters
----------
cls : :class:`~compas.datastructures.VolMesh`
The class of volmesh.
guids : sequence[str | System.Guid]
The *globally unique identifiers* of the polysurfaces.
precision: string
Precision of the polysurface connectivity.
Returns
-------
:class:`~compas.datastructures.Volmesh`
The volumetric mesh object.
Notes
-----
Essentially, this function does the following:
* find each of the polysurfaces and check if they have a boundary representation (b-rep)
* convert to b-rep and extract the edge loops
* make a face of each loop by referring to vertices using their geometric keys
* add a cell per brep
* and add the faces of a brep to the cell
* create a volmesh from the found vertices and cells
"""
gkey_xyz = {}
cells = []
for guid in guids:
cell = []
obj = sc.doc.Objects.Find(guid)
if not obj.Geometry.HasBrepForm:
continue
brep = Rhino.Geometry.Brep.TryConvertBrep(obj.Geometry)
for loop in brep.Loops:
curve = loop.To3dCurve()
segments = curve.Explode()
face = []
sp = segments[0].PointAtStart
ep = segments[0].PointAtEnd
sp_gkey = geometric_key(sp, precision)
ep_gkey = geometric_key(ep, precision)
gkey_xyz[sp_gkey] = sp
gkey_xyz[ep_gkey] = ep
face.append(sp_gkey)
face.append(ep_gkey)
for segment in segments[1:-1]:
ep = segment.PointAtEnd
ep_gkey = geometric_key(ep, precision)
face.append(ep_gkey)
gkey_xyz[ep_gkey] = ep
cell.append(face)
cells.append(cell)
gkey_index = dict((gkey, index) for index, gkey in enumerate(gkey_xyz))
vertices = [list(xyz) for gkey, xyz in gkey_xyz.items()]
cells = [[[gkey_index[gkey] for gkey in face] for face in cell] for cell in cells]
return cls.from_vertices_and_cells(vertices, cells) | PypiClean |
/Lotlan_Scheduler-1.1.2.tar.gz/Lotlan_Scheduler-1.1.2/lotlan_scheduler/api/materialflow.py |
# standard libraries
import uuid
import networkx as nx
import matplotlib.pyplot as plt
# local sources
from lotlan_scheduler.api.event import Event
from lotlan_scheduler.api.transportorder import TransportOrder
from lotlan_scheduler.logger.sqlite_logger import SQLiteLogger
from lotlan_scheduler.petri_net.logic import PetriNetLogic
from lotlan_scheduler.petri_net.generator import PetriNetGenerator
# globals defines
from lotlan_scheduler.defines import PetriNetConstants, LogicConstants
class MaterialFlowCallbacks(object):
"""
Contains lists of registered callback functions
for different states in the scheduling process
"""
def __init__(self):
self.triggered_by_cb = []
self.pickup_finished_cb = []
self.delivery_finished_cb = []
self.finished_by_cb = []
self.next_to_cb = []
self.task_finished_cb = []
self.all_finished_cb = []
class MaterialFlow():
""" Represents an abstract materialflow """
def __init__(self, _uuid, lotlan_structure, lotlan_string, tasks_in_mf, test_flag=False):
self._uuid = _uuid
self.name = ""
self._is_running = True
self.materialflow_callbacks = MaterialFlowCallbacks()
self.tasks_in_mf = tasks_in_mf
self.lotlan_structure = lotlan_structure
self.tasks = {}
self.ids = {}
self.triggered_by_events = {}
self.finished_by_events = {}
self.event_instances = {}
self.not_done_parents = {} # 0 if all parent tasks are done
self.tasks_done = {}
self.test_flag = test_flag
self.parent_count = {}
self.lotlan_string = lotlan_string
self.petri_net_generator = PetriNetGenerator(tasks_in_mf,
self.event_instances,
test_flag=test_flag)
self.logger = None
self.call_graph = None
self.startable_tasks = None
self.cycle_in_call_graph = None
self.petri_net_logic = None
def is_running(self):
return self._is_running
def start(self):
"""
Starts the materialflow scheduling
"""
self.logger = SQLiteLogger()
self.logger.insert_materialflow_in_sql(self._uuid, self.lotlan_string)
self.initialize_tasks(self.tasks_in_mf)
if self.tasks_in_mf:
self.name = self.tasks_in_mf[0].name
self.call_graph = self.create_call_graph(self.tasks_in_mf)
cycles = list(nx.simple_cycles(self.call_graph))
self.cycle_in_call_graph = len(cycles) > 0
self.startable_tasks = self.find_startable_tasks(self.call_graph, self.tasks_in_mf)
for instance in self.lotlan_structure.instances.values():
if instance.template_name == "Event":
self.event_instances[instance.name] = instance
task_representations = self.petri_net_generator.generate_task_nets()
self.petri_net_logic = PetriNetLogic(task_representations, self.test_flag)
self.create_event_information_list()
self.start_tasks(self.startable_tasks)
def start_tasks(self, tasks):
"""
Starts scheduling of the given tasks
if a task has a triggeredBy statement it waits for incoming events
otherwise the transport_order can be executed and so next_to is called
"""
next_tos = []
for task in tasks:
uuid_ = self.ids[task.name]
transport_order = task.transport_order
pickup = transport_order.pickup_tos.location
delivery = transport_order.delivery_tos.location
transport_order.state = TransportOrder.TransportOrderState.TASK_STARTED
state = transport_order.state
self.logger.insert_transport_order(self._uuid, uuid_, state, pickup, delivery)
if self.triggered_by_events[task.name]:
tb_events_of_task = self.triggered_by_events[task.name]
self.petri_net_logic.set_awaited_events(task, tb_events_of_task)
self.wait_for_triggered_by(uuid_, self.triggered_by_events[task.name])
transport_order.state = TransportOrder.TransportOrderState.TASK_WAIT_FOR_TRIGGERED_BY
state = transport_order.state
self.logger.insert_transport_order(self._uuid, uuid_, state, pickup, delivery)
else:
task_started_event = Event(PetriNetConstants.TASK_STARTED_PLACE, "", "Boolean",
comparator="", value=True)
self.petri_net_logic.set_awaited_events(task, [task_started_event])
self.fire_event(uuid_, task_started_event)
next_tos.append(task)
self.next_to(next_tos)
def create_call_graph(self, tasks):
"""
Creates a graph where every node is a task
and a directed edge represents an onDone
"""
call_graph = nx.DiGraph()
for task in tasks:
call_graph.add_node(task.name)
for child in task.on_done:
call_graph.add_edge(task.name, child)
return call_graph
def save_call_graph_img(self, filename):
""" Saves the generated call graph of the materialflow as image """
nx.draw(self.call_graph, with_labels=True)
plt.savefig(filename, dpi=300, bbox_inches="tight")
def find_startable_tasks(self, graph, tasks):
"""
Finds tasks that can be started:
task with no incoming edges in graph
"""
startable_tasks = []
for task in tasks:
incoming_edges = 0
for u, v in graph.in_edges(task.name):
# ignore self loops
if u != v:
incoming_edges = incoming_edges + 1
else:
self.not_done_parents[task.name] = 1
self.not_done_parents[task.name] = self.not_done_parents[task.name] + incoming_edges
self.parent_count[task.name] = self.not_done_parents[task.name]
if incoming_edges == 0:
startable_tasks.append(task)
return startable_tasks
def fire_event(self, uuid_, event):
""" Fires event to petri net corresponding to task of uuid """
task = self.tasks[str(uuid_)]
self.petri_net_logic.fire_event(task, event, self.on_petri_net_response)
def initialize_tasks(self, tasks):
""" Adds information for api classes to tasks and init dicts """
for i, task in enumerate(tasks):
if self.test_flag:
uuid_ = i
else:
uuid_ = uuid.uuid4()
self.tasks[str(uuid_)] = task
self.ids[task.name] = uuid_
self.tasks_done[task.name] = False
self.not_done_parents[task.name] = 0
transport_order = task.transport_order
transport_order.uuid = uuid_
pickup_tos = transport_order.pickup_tos
delivery_tos = transport_order.delivery_tos
for instance in self.lotlan_structure.instances.values():
if instance.template_name == "Location":
if pickup_tos.location.logical_name == instance.name:
pickup_tos.location.physical_name = instance.keyval["name"]
pickup_tos.location.location_type = instance.keyval["type"]
elif delivery_tos.location.logical_name == instance.name:
delivery_tos.location.physical_name = instance.keyval["name"]
delivery_tos.location.location_type = instance.keyval["type"]
def create_event_information_list(self):
""" Creates a list of events objects out of the event names """
for task in self.tasks_in_mf:
triggered_by = []
for event_name in task.triggered_by_events:
logical_name = self.event_instances[event_name].name
physical_name = self.event_instances[event_name].keyval["name"]
event_type = self.event_instances[event_name].keyval["type"]
triggered_by.append(Event(logical_name, physical_name, event_type, None, None))
self.triggered_by_events[task.name] = triggered_by
finished_by = []
for event_name in task.finished_by_events:
logical_name = self.event_instances[event_name].name
physical_name = self.event_instances[event_name].keyval["name"]
event_type = self.event_instances[event_name].keyval["type"]
finished_by.append(Event(logical_name, physical_name, event_type, None, None))
self.finished_by_events[task.name] = finished_by
def on_petri_net_response(self, msg, task):
"""
Handles incoming messages from the petri net logic and
calls corresponding methods
"""
if msg == LogicConstants.TRIGGERED_BY_PASSED_MSG:
self.next_to([task])
elif msg == LogicConstants.TOS_TB_PASSED_MSG:
self.on_tos_tb_passed(task)
elif msg == LogicConstants.TOS_WAIT_FOR_ACTION:
self.on_tos_wait_for_action(task)
elif msg == LogicConstants.TOS_FINISHED_MSG:
self.on_tos_finished(task)
elif msg == LogicConstants.TO_DONE_MSG:
self.on_to_done(task)
elif msg == LogicConstants.TASK_FINISHED_MSG:
self.on_task_finished(task)
def next_to(self, task_info):
""" Notifies listeners about the next transport orders and set petri net state """
if task_info:
transport_orders = {}
for task in task_info:
uid = self.ids[task.name]
transport_order = task.transport_order
task.transport_order.state = TransportOrder.TransportOrderState.PICKUP_STARTED
transport_orders[uid] = transport_order
to_done_event = Event("to_done", "", "Boolean",
comparator="", value=True)
self.petri_net_logic.set_awaited_events(task, [to_done_event])
self.start_tos(task, transport_order.pickup_tos)
for callback in self.materialflow_callbacks.next_to_cb:
callback(self._uuid, transport_orders)
def start_tos(self, task, tos, pickup=True):
""" Starts scheduling of the given TransportOrderStep """
if tos.triggered_by:
self.petri_net_logic.set_awaited_events(task, tos.triggered_by)
if pickup:
task.transport_order.state = TransportOrder.TransportOrderState.PICKUP_WAIT_FOR_TRIGGERED_BY
else:
task.transport_order.state = TransportOrder.TransportOrderState.DELIVERY_WAIT_FOR_TRIGGERED_BY
uid = self.ids[task.name]
self.log_transport_order(uid, task.transport_order)
else:
tos_started_event = Event(PetriNetConstants.TOS_STARTED_PLACE, "", "Boolean", value=True)
tos_done_event = Event(PetriNetConstants.TOS_MOVED_TO_LOCATION_PLACE, "", "Boolean", True)
self.petri_net_logic.set_awaited_events(task, [tos_started_event, tos_done_event])
self.petri_net_logic.fire_event(task, tos_started_event)
def on_tos_tb_passed(self, task):
"""
Gets called when a TriggeredBy is passed in a TransportOrderStep net.
Set the petr net state and set new awaited event "moved_to_location" for
either the Pickup Net or the Delivery net depending on the current state
"""
current_state = task.transport_order.state
uid = self.ids[task.name]
transport_order = task.transport_order
# check the current state and set the new one
if current_state == TransportOrder.TransportOrderState.PICKUP_WAIT_FOR_TRIGGERED_BY:
task.transport_order.state = TransportOrder.TransportOrderState.PICKUP_STARTED
elif current_state == TransportOrder.TransportOrderState.DELIVERY_WAIT_FOR_TRIGGERED_BY:
task.transport_order.state = TransportOrder.TransportOrderState.DELIVERY_STARTED
self.log_transport_order(uid, transport_order)
moved_to_locaction_event = Event("moved_to_location", "", "Boolean", value=True)
self.petri_net_logic.set_awaited_events(task, [moved_to_locaction_event])
def on_tos_wait_for_action(self, task):
"""
Gets called when the AGV has moved to the Location.
Set the petri net state and set FinishedBy events as awaited events.
"""
current_state = task.transport_order.state
tos = None
uid = self.ids[task.name]
transport_order = task.transport_order
# check the current state and set the new one
if current_state == TransportOrder.TransportOrderState.PICKUP_STARTED:
task.transport_order.state = TransportOrder.TransportOrderState.WAIT_FOR_LOADING
tos = task.transport_order.pickup_tos
elif current_state == TransportOrder.TransportOrderState.DELIVERY_STARTED:
task.transport_order.state = TransportOrder.TransportOrderState.WAIT_FOR_UNLOADING
tos = task.transport_order.delivery_tos
else:
print("Something went wrong!")
self.log_transport_order(uid, transport_order)
self.petri_net_logic.set_awaited_events(task, tos.finished_by)
def on_tos_finished(self, task):
"""
Gets called when a TransportOrderStep is done.
Set the petri net state and either call on_pickup_finished method
or on_delivery_finished method depending on the current state
"""
current_state = task.transport_order.state
uid = self.ids[task.name]
transport_order = task.transport_order
# check the current state and set the new one
if current_state == TransportOrder.TransportOrderState.WAIT_FOR_LOADING:
task.transport_order.state = TransportOrder.TransportOrderState.PICKUP_FINISHED
self.log_transport_order(uid, transport_order)
self.on_pickup_finished(task)
elif current_state == TransportOrder.TransportOrderState.WAIT_FOR_UNLOADING:
task.transport_order.state = TransportOrder.TransportOrderState.DELIVERY_FINISHED
self.log_transport_order(uid, transport_order)
self.on_delivery_finished(task)
else:
print("Something went wrong!")
def on_pickup_finished(self, task):
"""
Gets called when the Pickup TransportOrderStep is finished.
Set petri net state and start Delivery TransportOrderStep
"""
self.pickup_finished(self.ids[task.name])
task.transport_order.state = TransportOrder.TransportOrderState.DELIVERY_STARTED
self.start_tos(task, task.transport_order.delivery_tos, False)
def on_delivery_finished(self, task):
"""
Gets called when the Delivery TransportOrderStep is finished.
Set petri net state and fire the to_done event to the task net
"""
self.delivery_finished(self.ids[task.name])
to_done_event = Event("to_done", "", "Boolean", value=True)
self.petri_net_logic.set_awaited_events(task, [to_done_event])
self.petri_net_logic.fire_event(task, to_done_event, self.on_petri_net_response)
def on_to_done(self, task_info):
"""
Gets called when transport order is done by the AGV.
Set petri net state (wait for possible FinishedBy events)
"""
uid = self.ids[task_info.name]
if self.finished_by_events[task_info.name]:
transport_order = task_info.transport_order
transport_order.state = TransportOrder.TransportOrderState.TASK_WAIT_FOR_FINISHED_BY
self.log_transport_order(uid, transport_order)
finished_by_events = self.finished_by_events[task_info.name]
self.petri_net_logic.set_awaited_events(task_info, finished_by_events)
self.wait_for_finished_by(uid, self.finished_by_events[task_info.name])
def on_task_finished(self, task_info):
"""
Gets called when task is finished.
Starts possible onDone tasks and set petri net state
"""
uid = self.ids[task_info.name]
self.task_finished(uid)
self.tasks_done[task_info.name] = True
self.petri_net_logic.set_awaited_events(task_info, [None])
task_info.transport_order.state = TransportOrder.TransportOrderState.FINISHED
self.log_transport_order(uid, task_info.transport_order)
if task_info.on_done:
startable_tasks = []
for task in task_info.on_done:
self.not_done_parents[task] = self.not_done_parents[task] - 1
# all parent tasks are done start the task
if self.not_done_parents[task] == 0:
task_key = self.tasks[str(self.ids[task])]
startable_tasks.append(task_key)
self.not_done_parents[task_key.name] = self.parent_count[task_key.name]
self.start_tasks(startable_tasks)
elif self.all_tasks_done():
self._is_running = False
self.all_finished()
def all_tasks_done(self):
"""
Returns true if all tasks are done
"""
if self.cycle_in_call_graph is False:
for task_done in self.tasks_done.values():
if task_done is False:
return False
return True
return False
def log_transport_order(self, to_uuid, transport_order):
"""
Saves the given TransportOrder with its locations in the db
by calling insert method of the logger
"""
pickup_location = transport_order.pickup_tos.location
delivery_location = transport_order.delivery_tos.location
self.logger.insert_transport_order(self._uuid, to_uuid, transport_order.state,
pickup_location, delivery_location)
def wait_for_triggered_by(self, uuid_, event_information):
for callback in self.materialflow_callbacks.triggered_by_cb:
callback(self._uuid, uuid_, event_information)
def wait_for_finished_by(self, uuid_, event_information):
for callback in self.materialflow_callbacks.finished_by_cb:
callback(self._uuid, uuid_, event_information)
def task_finished(self, uuid_):
for callback in self.materialflow_callbacks.task_finished_cb:
callback(self._uuid, uuid_)
def all_finished(self):
for callback in self.materialflow_callbacks.all_finished_cb:
callback(self._uuid)
def pickup_finished(self, uuid_):
for callback in self.materialflow_callbacks.pickup_finished_cb:
callback(self._uuid, uuid_)
def delivery_finished(self, uuid_):
for callback in self.materialflow_callbacks.delivery_finished_cb:
callback(self._uuid, uuid_)
def register_callback_triggered_by(self, callback):
"""
If a Task can be started and has a TriggeredBy defined, all
registered callback functions will be called
"""
if callback not in self.materialflow_callbacks.triggered_by_cb:
self.materialflow_callbacks.triggered_by_cb.append(callback)
def register_callback_next_to(self, callback):
"""
If a Task was started and the TriggeredBy condition is satisfied or there is
no TriggeredBy all callback functions registered here will be called
"""
if callback not in self.materialflow_callbacks.next_to_cb:
self.materialflow_callbacks.next_to_cb.append(callback)
def register_callback_finished_by(self, callback):
"""
Functions passed in to this method will be called when the TransportOrder is done
which means a "to_done" event was sent and a FinishedBy was defined
"""
if callback not in self.materialflow_callbacks.finished_by_cb:
self.materialflow_callbacks.finished_by_cb.append(callback)
def register_callback_task_finished(self, callback):
"""
If a Task is finished functions registered here are being called.
"""
if callback not in self.materialflow_callbacks.task_finished_cb:
self.materialflow_callbacks.task_finished_cb.append(callback)
def register_callback_all_finished(self, callback):
"""
If all Tasks in a Materialflow are finished functions registered here are being called
"""
if callback not in self.materialflow_callbacks.all_finished_cb:
self.materialflow_callbacks.all_finished_cb.append(callback)
def register_callback_pickup_finished(self, callback):
"""
Functions passed in to this method will be called when the Pickup TransportOrderStep
of a task is finished
"""
if callback not in self.materialflow_callbacks.pickup_finished_cb:
self.materialflow_callbacks.pickup_finished_cb.append(callback)
def register_callback_delivery_finished(self, callback):
"""
Functions passed in to this method will be called when the Delivery TransportOrderStep
of a task is finished
"""
if callback not in self.materialflow_callbacks.delivery_finished_cb:
self.materialflow_callbacks.delivery_finished_cb.append(callback) | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/meter_calendarized_use_vs_weather_response_py3.py |
from msrest.serialization import Model
class MeterCalendarizedUseVsWeatherResponse(Model):
"""MeterCalendarizedUseVsWeatherResponse.
:param year: The year
:type year: int
:param is_heating_weather_sensitive: Is heating weather sensitive?
:type is_heating_weather_sensitive: bool
:param winter_balance_point: Heating needed below this temperature
:type winter_balance_point: int
:param heating_base_use_per_day: The total base heating use per day
:type heating_base_use_per_day: float
:param heating_weather_factor: The heating weather factor
:type heating_weather_factor: float
:param is_cooling_weather_sensitive: Is cooling weather sensitive?
:type is_cooling_weather_sensitive: bool
:param summer_balance_point: Cooling needed above this temperature
:type summer_balance_point: int
:param cooling_base_use_per_day: The total base cooling use per day
:type cooling_base_use_per_day: float
:param cooling_weather_factor: The cooling weather factor
:type cooling_weather_factor: float
:param use_unit:
:type use_unit: ~energycap.sdk.models.UnitChild
"""
_attribute_map = {
'year': {'key': 'year', 'type': 'int'},
'is_heating_weather_sensitive': {'key': 'isHeatingWeatherSensitive', 'type': 'bool'},
'winter_balance_point': {'key': 'winterBalancePoint', 'type': 'int'},
'heating_base_use_per_day': {'key': 'heatingBaseUsePerDay', 'type': 'float'},
'heating_weather_factor': {'key': 'heatingWeatherFactor', 'type': 'float'},
'is_cooling_weather_sensitive': {'key': 'isCoolingWeatherSensitive', 'type': 'bool'},
'summer_balance_point': {'key': 'summerBalancePoint', 'type': 'int'},
'cooling_base_use_per_day': {'key': 'coolingBaseUsePerDay', 'type': 'float'},
'cooling_weather_factor': {'key': 'coolingWeatherFactor', 'type': 'float'},
'use_unit': {'key': 'useUnit', 'type': 'UnitChild'},
}
def __init__(self, *, year: int=None, is_heating_weather_sensitive: bool=None, winter_balance_point: int=None, heating_base_use_per_day: float=None, heating_weather_factor: float=None, is_cooling_weather_sensitive: bool=None, summer_balance_point: int=None, cooling_base_use_per_day: float=None, cooling_weather_factor: float=None, use_unit=None, **kwargs) -> None:
super(MeterCalendarizedUseVsWeatherResponse, self).__init__(**kwargs)
self.year = year
self.is_heating_weather_sensitive = is_heating_weather_sensitive
self.winter_balance_point = winter_balance_point
self.heating_base_use_per_day = heating_base_use_per_day
self.heating_weather_factor = heating_weather_factor
self.is_cooling_weather_sensitive = is_cooling_weather_sensitive
self.summer_balance_point = summer_balance_point
self.cooling_base_use_per_day = cooling_base_use_per_day
self.cooling_weather_factor = cooling_weather_factor
self.use_unit = use_unit | PypiClean |
/Eryn-1.1.4.tar.gz/Eryn-1.1.4/eryn/backends/hdfbackend.py |
from __future__ import division, print_function
__all__ = ["HDFBackend", "TempHDFBackend", "does_hdf5_support_longdouble"]
import os
from tempfile import NamedTemporaryFile
import numpy as np
from .. import __version__
from .backend import Backend
try:
import h5py
except ImportError:
h5py = None
def does_hdf5_support_longdouble():
if h5py is None:
return False
with NamedTemporaryFile(
prefix="emcee-temporary-hdf5", suffix=".hdf5", delete=False
) as f:
f.close()
with h5py.File(f.name, "w") as hf:
g = hf.create_group("group")
g.create_dataset("data", data=np.ones(1, dtype=np.longdouble))
if g["data"].dtype != np.longdouble:
return False
with h5py.File(f.name, "r") as hf:
if hf["group"]["data"].dtype != np.longdouble:
return False
return True
class HDFBackend(Backend):
"""A backend that stores the chain in an HDF5 file using h5py
.. note:: You must install `h5py <http://www.h5py.org/>`_ to use this
backend.
Args:
filename (str): The name of the HDF5 file where the chain will be
saved.
name (str, optional): The name of the group where the chain will
be saved. (default: ``"mcmc"``)
read_only (bool, optional): If ``True``, the backend will throw a
``RuntimeError`` if the file is opened with write access.
(default: ``False``)
dtype (dtype, optional): Dtype to use for data storage. If None,
program uses np.float64. (default: ``None``)
compression (str, optional): Compression type for h5 file. See more information
in the
`h5py documentation <https://docs.h5py.org/en/stable/high/dataset.html#filter-pipeline>`_.
(default: ``None``)
compression_opts (int, optional): Compression level for h5 file. See more information
in the
`h5py documentation <https://docs.h5py.org/en/stable/high/dataset.html#filter-pipeline>`_.
(default: ``None``)
store_missing_leaves (double, optional): Number to store for leaves that are not
used in a specific step. (default: ``np.nan``)
"""
def __init__(
self,
filename,
name="mcmc",
read_only=False,
dtype=None,
compression=None,
compression_opts=None,
store_missing_leaves=np.nan,
):
if h5py is None:
raise ImportError("you must install 'h5py' to use the HDFBackend")
# store all necessary quantities
self.filename = filename
self.name = name
self.read_only = read_only
self.compression = compression
self.compression_opts = compression_opts
if dtype is None:
self.dtype_set = False
self.dtype = np.float64
else:
self.dtype_set = True
self.dtype = dtype
self.store_missing_leaves = store_missing_leaves
@property
def initialized(self):
"""Check if backend file has been initialized properly."""
if not os.path.exists(self.filename):
return False
try:
with self.open() as f:
return self.name in f
except (OSError, IOError):
return False
def open(self, mode="r"):
"""Opens the h5 file in the proper mode.
Args:
mode (str, optional): Mode to open h5 file.
Returns:
H5 file object: Opened file.
Raises:
RuntimeError: If backend is opened for writing when it is read-only.
"""
if self.read_only and mode != "r":
raise RuntimeError(
"The backend has been loaded in read-only "
"mode. Set `read_only = False` to make "
"changes."
)
# open the file
f = h5py.File(self.filename, mode)
# get the data type and store it if it is not previously set
if not self.dtype_set and self.name in f:
# get the group from the file
g = f[self.name]
if "chain" in g:
# get the model names in chain
keys = list(g["chain"])
# they all have the same dtype so use the first one
try:
self.dtype = g["chain"][keys[0]].dtype
# we now have it
self.dtype_set = True
# catch error if the chain has not been initialized yet
except IndexError:
pass
return f
def reset(
self,
nwalkers,
ndims,
nleaves_max=1,
ntemps=1,
branch_names=None,
nbranches=1,
rj=False,
moves=None,
**info,
):
"""Clear the state of the chain and empty the backend
Args:
nwalkers (int): The size of the ensemble
ndims (int, list of ints, or dict): The number of dimensions for each branch. If
``dict``, keys should be the branch names and values the associated dimensionality.
nleaves_max (int, list of ints, or dict, optional): Maximum allowable leaf count for each branch.
It should have the same length as the number of branches.
If ``dict``, keys should be the branch names and values the associated maximal leaf value.
(default: ``1``)
ntemps (int, optional): Number of rungs in the temperature ladder.
(default: ``1``)
branch_names (str or list of str, optional): Names of the branches used. If not given,
branches will be names ``model_0``, ..., ``model_n`` for ``n`` branches.
(default: ``None``)
nbranches (int, optional): Number of branches. This is only used if ``branch_names is None``.
(default: ``1``)
rj (bool, optional): If True, reversible-jump techniques are used.
(default: ``False``)
moves (list, optional): List of all of the move classes input into the sampler.
(default: ``None``)
**info (dict, optional): Any other key-value pairs to be added
as attributes to the backend. These are also added to the HDF5 file.
"""
# open file in append mode
with self.open("a") as f:
# we are resetting so if self.name in the file we need to delete it
if self.name in f:
del f[self.name]
# turn things into lists/dicts if needed
if branch_names is not None:
if isinstance(branch_names, str):
branch_names = [branch_names]
elif not isinstance(branch_names, list):
raise ValueError("branch_names must be string or list of strings.")
else:
branch_names = ["model_{}".format(i) for i in range(nbranches)]
nbranches = len(branch_names)
if isinstance(ndims, int):
assert len(branch_names) == 1
ndims = {branch_names[0]: ndims}
elif isinstance(ndims, list) or isinstance(ndims, np.ndarray):
assert len(branch_names) == len(ndims)
ndims = {bn: nd for bn, nd in zip(branch_names, ndims)}
elif isinstance(ndims, dict):
assert len(list(ndims.keys())) == len(branch_names)
for key in ndims:
if key not in branch_names:
raise ValueError(f"{key} is in ndims but does not appear in branch_names: {branch_names}.")
else:
raise ValueError("ndims is to be a scalar int, list or dict.")
if isinstance(nleaves_max, int):
assert len(branch_names) == 1
nleaves_max = {branch_names[0]: nleaves_max}
elif isinstance(nleaves_max, list) or isinstance(nleaves_max, np.ndarray):
assert len(branch_names) == len(nleaves_max)
nleaves_max = {bn: nl for bn, nl in zip(branch_names, nleaves_max)}
elif isinstance(nleaves_max, dict):
assert len(list(nleaves_max.keys())) == len(branch_names)
for key in nleaves_max:
if key not in branch_names:
raise ValueError(f"{key} is in nleaves_max but does not appear in branch_names: {branch_names}.")
else:
raise ValueError("nleaves_max is to be a scalar int, list, or dict.")
# store all the info needed in memory and in the file
g = f.create_group(self.name)
g.attrs["version"] = __version__
g.attrs["nbranches"] = len(branch_names)
g.attrs["branch_names"] = branch_names
g.attrs["ntemps"] = ntemps
g.attrs["nwalkers"] = nwalkers
g.attrs["has_blobs"] = False
g.attrs["rj"] = rj
g.attrs["iteration"] = 0
# create info group
g.create_group("info")
# load info into class and into file
for key, value in info.items():
setattr(self, key, value)
g["info"].attrs[key] = value
# store nleaves max and ndims dicts
g.create_group("ndims")
for key, value in ndims.items():
g["ndims"].attrs[key] = value
g.create_group("nleaves_max")
for key, value in nleaves_max.items():
g["nleaves_max"].attrs[key] = value
# prepare all the data sets
g.create_dataset(
"accepted",
data=np.zeros((ntemps, nwalkers)),
compression=self.compression,
compression_opts=self.compression_opts,
)
g.create_dataset(
"swaps_accepted",
data=np.zeros((ntemps - 1,)),
compression=self.compression,
compression_opts=self.compression_opts,
)
if self.rj:
g.create_dataset(
"rj_accepted",
data=np.zeros((ntemps, nwalkers)),
compression=self.compression,
compression_opts=self.compression_opts,
)
g.create_dataset(
"log_like",
(0, ntemps, nwalkers),
maxshape=(None, ntemps, nwalkers),
dtype=self.dtype,
compression=self.compression,
compression_opts=self.compression_opts,
)
g.create_dataset(
"log_prior",
(0, ntemps, nwalkers),
maxshape=(None, ntemps, nwalkers),
dtype=self.dtype,
compression=self.compression,
compression_opts=self.compression_opts,
)
g.create_dataset(
"betas",
(0, ntemps),
maxshape=(None, ntemps),
dtype=self.dtype,
compression=self.compression,
compression_opts=self.compression_opts,
)
# setup data sets for branch-specific items
chain = g.create_group("chain")
inds = g.create_group("inds")
for name in branch_names:
nleaves = self.nleaves_max[name]
ndim = self.ndims[name]
chain.create_dataset(
name,
(0, ntemps, nwalkers, nleaves, ndim),
maxshape=(None, ntemps, nwalkers, nleaves, ndim),
dtype=self.dtype,
compression=self.compression,
compression_opts=self.compression_opts,
)
inds.create_dataset(
name,
(0, ntemps, nwalkers, nleaves),
maxshape=(None, ntemps, nwalkers, nleaves),
dtype=bool,
compression=self.compression,
compression_opts=self.compression_opts,
)
# store move specific information
if moves is not None:
move_group = g.create_group("moves")
# setup info and keys
for full_move_name in moves:
single_move = move_group.create_group(full_move_name)
# prepare information dictionary
single_move.create_dataset(
"acceptance_fraction",
(ntemps, nwalkers),
maxshape=(ntemps, nwalkers),
dtype=self.dtype,
compression=self.compression,
compression_opts=self.compression_opts,
)
else:
self.move_info = None
self.blobs = None
@property
def nwalkers(self):
"""Get nwalkers from h5 file."""
with self.open() as f:
return f[self.name].attrs["nwalkers"]
@property
def ntemps(self):
"""Get ntemps from h5 file."""
with self.open() as f:
return f[self.name].attrs["ntemps"]
@property
def rj(self):
"""Get rj from h5 file."""
with self.open() as f:
return f[self.name].attrs["rj"]
@property
def nleaves_max(self):
"""Get nleaves_max from h5 file."""
with self.open() as f:
return {key: f[self.name]["nleaves_max"].attrs[key] for key in f[self.name]["nleaves_max"].attrs}
@property
def ndims(self):
"""Get ndims from h5 file."""
with self.open() as f:
return {key: f[self.name]["ndims"].attrs[key] for key in f[self.name]["ndims"].attrs}
@property
def move_keys(self):
"""Get move_keys from h5 file."""
with self.open() as f:
return list(f[self.name]["moves"])
@property
def branch_names(self):
"""Get branch names from h5 file."""
with self.open() as f:
return f[self.name].attrs["branch_names"]
@property
def nbranches(self):
"""Get number of branches from h5 file."""
with self.open() as f:
return f[self.name].attrs["nbranches"]
@property
def reset_args(self):
"""Get reset_args from h5 file."""
return [self.nwalkers, self.ndims]
@property
def reset_kwargs(self):
"""Get reset_kwargs from h5 file."""
return dict(
nleaves_max=self.nleaves_max,
ntemps=self.ntemps,
branch_names=self.branch_names,
rj=self.rj,
moves=self.moves,
)
@property
def reset_kwargs(self):
"""Get reset_kwargs from h5 file."""
with self.open() as f:
return f[self.name].attrs["reset_kwargs"]
def has_blobs(self):
"""Returns ``True`` if the model includes blobs"""
with self.open() as f:
return f[self.name].attrs["has_blobs"]
def get_value(self, name, thin=1, discard=0, slice_vals=None):
"""Returns a requested value to user.
This function helps to streamline the backend for both
basic and hdf backend.
Args:
name (str): Name of value requested.
thin (int, optional): Take only every ``thin`` steps from the
chain. (default: ``1``)
discard (int, optional): Discard the first ``discard`` steps in
the chain as burn-in. (default: ``0``)
slice_vals (indexing np.ndarray or slice, optional): If provided, slice the array directly
from the HDF5 file with slice = ``slice_vals``. ``thin`` and ``discard`` will be
ignored if slice_vals is not ``None``. This is particularly useful if files are
very large and the user only wants a small subset of the overall array.
(default: ``None``)
Returns:
dict or np.ndarray: Values requested.
"""
# check if initialized
if not self.initialized:
raise AttributeError(
"You must run the sampler with "
"'store == True' before accessing the "
"results"
)
if slice_vals is None:
slice_vals = slice(discard + thin - 1, self.iteration, thin)
# open the file wrapped in a "with" statement
with self.open() as f:
# get the group that everything is stored in
g = f[self.name]
iteration = g.attrs["iteration"]
if iteration <= 0:
raise AttributeError(
"You must run the sampler with "
"'store == True' before accessing the "
"results"
)
if name == "blobs" and not g.attrs["has_blobs"]:
return None
if name == "chain":
v_all = {key: g["chain"][key][slice_vals] for key in g["chain"]}
return v_all
if name == "inds":
v_all = {key: g["inds"][key][slice_vals] for key in g["inds"]}
return v_all
v = g[name][slice_vals]
return v
def get_move_info(self):
"""Get move information.
Returns:
dict: Keys are move names and values are dictionaries with information on the moves.
"""
# setup output dictionary
move_info_out = {}
with self.open() as f:
g = f[self.name]
# iterate through everything and produce a dictionary
for move_name in g["moves"]:
move_info_out[move_name] = {}
for info_name in g["moves"][move_name]:
move_info_out[move_name][info_name] = g["moves"][move_name][
info_name
][:]
return move_info_out
@property
def shape(self):
"""The dimensions of the ensemble
Returns:
dict: Shape of samples
Keys are ``branch_names`` and values are tuples with
shapes of individual branches: (ntemps, nwalkers, nleaves_max, ndim).
"""
# open file wrapped in with
with self.open() as f:
g = f[self.name]
return {
key: (g.attrs["ntemps"], g.attrs["nwalkers"], self.nleaves_max[key], self.ndims[key])
for key in g.attrs["branch_names"]
}
@property
def iteration(self):
"""Number of iterations stored in the hdf backend so far."""
with self.open() as f:
return f[self.name].attrs["iteration"]
@property
def accepted(self):
"""Number of accepted moves per walker."""
with self.open() as f:
return f[self.name]["accepted"][...]
@property
def rj_accepted(self):
"""Number of accepted rj moves per walker."""
with self.open() as f:
return f[self.name]["rj_accepted"][...]
@property
def swaps_accepted(self):
"""Number of accepted swaps."""
with self.open() as f:
return f[self.name]["swaps_accepted"][...]
@property
def random_state(self):
"""Get the random state"""
with self.open() as f:
elements = [
v
for k, v in sorted(f[self.name].attrs.items())
if k.startswith("random_state_")
]
return elements if len(elements) else None
def grow(self, ngrow, blobs):
"""Expand the storage space by some number of samples
Args:
ngrow (int): The number of steps to grow the chain.
blobs (None or np.ndarray): The current array of blobs. This is used to compute the
dtype for the blobs array.
"""
self._check_blobs(blobs)
# open the file in append mode
with self.open("a") as f:
g = f[self.name]
# resize all the arrays accordingly
ntot = g.attrs["iteration"] + ngrow
for key in g["chain"]:
g["chain"][key].resize(ntot, axis=0)
g["inds"][key].resize(ntot, axis=0)
g["log_like"].resize(ntot, axis=0)
g["log_prior"].resize(ntot, axis=0)
g["betas"].resize(ntot, axis=0)
# deal with blobs
if blobs is not None:
has_blobs = g.attrs["has_blobs"]
# if blobs have not been added yet
if not has_blobs:
nwalkers = g.attrs["nwalkers"]
ntemps = g.attrs["ntemps"]
g.create_dataset(
"blobs",
(ntot, ntemps, nwalkers, blobs.shape[-1]),
maxshape=(None, ntemps, nwalkers, blobs.shape[-1]),
dtype=self.dtype,
compression=self.compression,
compression_opts=self.compression_opts,
)
else:
# resize the blobs if they have been there
g["blobs"].resize(ntot, axis=0)
if g["blobs"].shape[1:] != blobs.shape:
raise ValueError(
"Existing blobs have shape {} but new blobs "
"requested with shape {}".format(
g["blobs"].shape[1:], blobs.shape
)
)
g.attrs["has_blobs"] = True
def save_step(
self,
state,
accepted,
rj_accepted=None,
swaps_accepted=None,
moves_accepted_fraction=None,
):
"""Save a step to the backend
Args:
state (State): The :class:`State` of the ensemble.
accepted (ndarray): An array of boolean flags indicating whether
or not the proposal for each walker was accepted.
rj_accepted (ndarray, optional): An array of the number of accepted steps
for the reversible jump proposal for each walker.
If :code:`self.rj` is True, then rj_accepted must be an array with
:code:`rj_accepted.shape == accepted.shape`. If :code:`self.rj`
is False, then rj_accepted must be None, which is the default.
swaps_accepted (ndarray, optional): 1D array with number of swaps accepted
for the in-model step. (default: ``None``)
moves_accepted_fraction (dict, optional): Dict of acceptance fraction arrays for all of the
moves in the sampler. This dict must have the same keys as ``self.move_keys``.
(default: ``None``)
"""
# open for appending in with statement
with self.open("a") as f:
g = f[self.name]
# get the iteration left off on
iteration = g.attrs["iteration"]
# make sure the backend has all the information needed to store everything
for key in [
"rj",
"ntemps",
"nwalkers",
"nbranches",
"branch_names",
"ndims",
]:
if not hasattr(self, key):
setattr(self, key, g.attrs[key])
# check the inputs are okay
self._check(
state, accepted, rj_accepted=rj_accepted, swaps_accepted=swaps_accepted,
)
# branch-specific
for name, model in state.branches.items():
g["inds"][name][iteration] = model.inds
# use self.store_missing_leaves to set value for missing leaves
# state retains old coordinates
coords_in = model.coords * model.inds[:, :, :, None]
inds_all = np.repeat(model.inds, coords_in.shape[-1], axis=-1).reshape(
model.inds.shape + (coords_in.shape[-1],)
)
coords_in[~inds_all] = self.store_missing_leaves
g["chain"][name][self.iteration] = coords_in
# store everything else in the file
g["log_like"][iteration, :] = state.log_like
g["log_prior"][iteration, :] = state.log_prior
if state.blobs is not None:
g["blobs"][iteration, :] = state.blobs
if state.betas is not None:
g["betas"][self.iteration, :] = state.betas
g["accepted"][:] += accepted
if swaps_accepted is not None:
g["swaps_accepted"][:] += swaps_accepted
if self.rj:
g["rj_accepted"][:] += rj_accepted
for i, v in enumerate(state.random_state):
g.attrs["random_state_{0}".format(i)] = v
g.attrs["iteration"] = iteration + 1
# moves
if moves_accepted_fraction is not None:
if "moves" not in g:
raise ValueError(
"""moves_accepted_fraction was passed, but moves_info was not initialized. Use the moves kwarg
in the reset function."""
)
# update acceptance fractions
for move_key in self.move_keys:
g["moves"][move_key]["acceptance_fraction"][
:
] = moves_accepted_fraction[move_key]
class TempHDFBackend(object):
"""Check if HDF5 is working and available."""
def __init__(self, dtype=None, compression=None, compression_opts=None):
self.dtype = dtype
self.filename = None
self.compression = compression
self.compression_opts = compression_opts
def __enter__(self):
f = NamedTemporaryFile(
prefix="emcee-temporary-hdf5", suffix=".hdf5", delete=False
)
f.close()
self.filename = f.name
return HDFBackend(
f.name,
"test",
dtype=self.dtype,
compression=self.compression,
compression_opts=self.compression_opts,
)
def __exit__(self, exception_type, exception_value, traceback):
os.remove(self.filename) | PypiClean |
/Evmlab-0.3.0.0.1-py3-none-any.whl/evmlab/contract.py | import re
from evmlab.opcodes import parseCode
"""
Solidity source code mappings, as
documented [here](http://solidity.readthedocs.io/en/develop/miscellaneous.html#source-mappings)
"""
def update(original, changes):
retval = []
for i in range(0, len(original)):
val = original[i]
if i < len(changes) and len(changes[i]) > 0:
val = changes[i]
retval.append(val)
return retval
def parseSourceMap(maptext):
mapping = []
if maptext is None:
return mapping
entries = maptext.split(";")
m = ["", "", "", ""]
for e in entries:
vals = e.split(":")
m = update(m, vals)
mapping.append(m)
return mapping
class Contract():
_create = False
bin = None
ins = None
binRuntime = None
insRuntime = None
lastSource = None
name = ""
def __init__(self, sources, contract=None, name=""):
self.sources = sources or []
self._contractTexts = {}
self._sourceCache = {}
self.name = name
self._loadContract(contract)
@property
def create(self):
return self._create
@create.setter
def create(self, val):
self._create = val
self._loadContractTexts()
@property
def contractTexts(self):
if len(self._contractTexts.keys()) == 0:
self._loadContractTexts()
return self._contractTexts
def isInitialized(self):
return self.bin is not None or self.binRuntime is not None
def getSourceCode(self, pc):
try:
[s, l, f, j] = self._getInstructionMapping(pc)
f = int(f)
c = self.contractTexts[f]
except KeyError:
if self.lastSource:
return self._sourceCache[self.lastSource][0], self._sourceCache[self.lastSource][1]
return "Missing code", (0, 0)
s = int(s)
l = int(l)
h = hash((s, l, f, j))
if h in self._sourceCache:
self.lastSource = h
return self._sourceCache[h][0], self._sourceCache[h][1]
# contract is missing, return the last valid ins mapping
if f < 0:
while True:
pc -= 1
try:
[s, l, f, j] = self._getInstructionMapping(pc)
except KeyError:
f = -1
f = int(f)
if f > 0:
c = self.contractTexts[f]
s = int(s)
l = int(l)
break
# see if text contains multiple contracts
contract_start_indices = [m.start(0)
for m in re.finditer('^ *contract ', c)]
# for multi contract files, get the start of the contract for the current instruction
if len(contract_start_indices) > 1:
contract_start = 0
contract_end = -1
for i in contract_start_indices:
if i == s:
contract_start = s
break
elif i > s:
# get the previous index
ci = contract_start_indices.index(i) - 1
if ci >= 0:
contract_start = contract_start_indices[ci]
break
elif s > i and i == contract_start_indices[-1]:
contract_start = contract_start_indices[-1]
pos = contract_start + c[contract_start:].find('{')
openBr = 0
while pos < len(c):
if c[pos] == '{':
openBr += 1
elif c[pos] == '}':
openBr -= 1
if openBr == 0:
contract_end = pos + 1
break
pos += 1
# return only the contract we're interested in
# we need to update the bytes start & end pos to reflect the truncated text we are returning
res = (c[contract_start:contract_end], [s - contract_start, l])
self._sourceCache[h] = res
self.lastSource = h
return res[0], res[1]
else:
self._sourceCache[h] = (c, [s, l])
self.lastSource = h
return c, [s, l]
def _getInstructionMapping(self, pc):
"""
:param pc: programCounter to fetch mapping for
:return: [s, l, f, j] where:
s is the byte-offset to the start of the range in the source file,
l is the length of the source range in bytes and
f is the source index mentioned above.
j can be either i, o or -
i signifying whether a jump instruction goes into a function,
o returns from a function or
- is a regular jump as part of e.g. a loop.
"""
i = self._getMappingIndex(pc)
mapping = self.mapping if self.create else self.mappingRuntime
return mapping[i]
def _getMappingIndex(self, pc):
ins = self.ins if self.create else self.insRuntime
pcs = list(ins.keys())
if pc in pcs:
return pcs.index(pc)
raise KeyError
def _loadContract(self, contract):
if not contract:
return
def load(key):
try:
return contract[key]
except KeyError:
print("Contract JSON missing key: %s" % key)
return None
bytecode = load('bin-runtime')
if bytecode:
self.binRuntime = bytecode
self.insRuntime = parseCode(bytecode)
bytecode = load('bin')
if bytecode:
self.bin = bytecode
self.ins = parseCode(bytecode)
self.mappingRuntime = parseSourceMap(load('srcmap-runtime'))
self.mapping = parseSourceMap(load('srcmap'))
def _loadContractTexts(self):
self._sourceCache = {}
mapping = self.mapping if self.create else self.mappingRuntime
contract_indexes = set()
for [s, l, f, j] in mapping:
f = int(f)
if (f > 0):
contract_indexes.add(f)
for i in contract_indexes:
self._contractTexts[i] = self.sources[i] | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/utils/termcolors.py | color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = {color_names[x]: '3%s' % x for x in range(8)}
background = {color_names[x]: '4%s' % x for x in range(8)}
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Return your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Return the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.items():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Return a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'SUCCESS': {},
'WARNING': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a palette definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the palette
definition will augment the base palette definition.
Valid roles:
'error', 'success', 'warning', 'notice', 'sql_field', 'sql_coltype',
'sql_keyword', 'sql_table', 'http_info', 'http_success',
'http_redirect', 'http_not_modified', 'http_bad_request',
'http_not_found', 'http_server_error', 'migrate_heading',
'migrate_label'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal', 'noreset'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict)
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette | PypiClean |
/Moments-2.0.tar.gz/Moments-2.0/moments/export.py | import sys, subprocess, os, re
from moments.journal import Journal
from moments.tag import Tags
from moments.path import Path, load_journal
from moments.timestamp import Timestamp
def merge_many(source, destination, add_tags=[]):
"""
use load journal to load the source directory
then save the resulting journal to the temporary destination
"""
j = load_journal(source, add_tags)
j.sort_entries('reverse-chronlological')
j.to_file(destination)
def merge_logs(f1, f2, add_tags=[], ofile="", verbose=False):
"""
this is a common operation involving two log files or two Journal objects
it is fairly simple with the Journal object,
but this handles all of the calls
for creating those Journal objects from files
add tags will only apply to the first file
it is being merged into the second
"""
#shouldn't need osbrowser here... we know the absolute paths via shellx
#n1 = osbrowser.meta.make_node(f1)
result = ''
j = Journal()
j.load(f1, add_tags)
len1 = len(j.entries())
j2 = Journal()
j2.load(f2)
len2 = len(j2.entries())
j.load(f2)
len3 = len(j.entries())
result += "merge resulted in %s entries from %s and %s\n" % (len3, len1, len2)
f2_obj = Path(f2)
if not ofile:
now = Timestamp(now=True)
temp_name = "merge-%s-%s.txt" % (now.compact(), f2_obj.name)
ofile = os.path.join(str(f2_obj.parent()), temp_name)
result += "SAVING as: %s\n" % ofile
j.save(ofile)
if verbose:
print result
#if there are dupes the two totals may not add up to the new total
# (does not necessarily mean a conflict)
#and now journal can handle multiple entries with the same timestamp,
# (so it will not discard subsequent entries with the same timestamp)
#so it may be ok to always accept a merge
# (as long as both files consisted of actual moment entries)
#
#if (len1+len2 == len3):
# return (ofile, 1)
#else:
# result += "WARNING: dupes/conflicts encountered<br>"
# return (ofile, 0)
return (ofile, 1)
def export_logs(source, destination, add_tags=[], recurse=True):
"""
very similar to diff-directories functionality
can't think of a good way to reuse at this point...
going ahead with repeat
"""
conflicts = []
#src = make_node(source, relative=False)
src = Path(source).load()
src.scan_directory()
#dst = make_node(destination, relative=False)
dst = Path(destination).load()
dst.scan_directory()
dstlistdircp = dst.listdir[:]
print "items found: %s" % src.listdir
for i in src.listdir:
#items to ignore (and make sure it's a text file)
if i not in [ "ignore_me.txt", ".hg", "README.txt" ] and re.search("\.txt", i):
#print datetime.now()
n1path = Path(os.path.join(source, i)) # == i ???
n2path = Path(os.path.join(destination, i))
print "exporting: %s" % i
if i in dstlistdircp:
#they both have an item with the same name
dstlistdircp.remove(i)
#n1 = make_node(n1path)
#n2 = make_node(n2path)
n1 = n1path.load()
n2 = n2path.load()
if n1path.type() == "Directory":
if recurse:
conflicts.extend(export_logs(n1, n2, add_tags, recurse))
else:
print "Not recursing into directory: %s" % n1
else:
#must have 2 files... lets merge them
(merged, result) = merge_logs(n1path, n2path, add_tags)
if not result:
#must have been a problem in result (dupes)
#save for later
conflicts.append(merged)
else:
#merge came back with correct number of new entries
#lets remove the originals
#and rename the new one
#os.remove(str(n2path))
n2path.remove()
os.rename(merged, str(n2path))
#os.remove(n1path)
n1path.remove()
else:
# the file is in the source only
# lets add tags (if any) then move the file to dst
j = Journal()
j.load(n1path, add_tags)
j.save(n1path)
if os.name == "nt":
#move might not work as in the case of pictures
os.rename(str(n1path), str(n2path))
else:
#on posix type systems this is more likely to work
#between different devices
mv = subprocess.Popen("mv %s %s" % (n1path, n2path), shell=True, stdout=subprocess.PIPE)
mv.wait()
#if anything is left in dstlistdircp, it must not have been in src
#in the case of an export, it's already on the destination... fine
if len(dstlistdircp):
pass
return conflicts
def usage():
print """
python /c/code/python/scripts/export_logs.py outgoing/ /media/USB/outgoing/ system_name-other_tags
"""
def main():
if len (sys.argv) > 1:
if sys.argv[1] in ['--help','help'] or len(sys.argv) < 2:
usage()
d1 = sys.argv[1]
d2 = sys.argv[2]
if len(sys.argv) > 3:
#add_tags = tags_from_string(sys.argv[3])
add_tags = Tags().from_tag_string(sys.argv[3])
else:
add_tags = []
conflicts = export_logs(d1, d2, add_tags)
for c in conflicts:
print "Conflict found in: %s" % c
# for merging
# f1 = sys.argv[1]
# f2 = sys.argv[2]
# merge_logs(f1, f2, verbose=True)
if __name__ == '__main__':
main() | PypiClean |
/CmlArabicReaderThird-0.1-py3-none-any.whl/ThirdcamelArabicReader/camel_tools/utils/charmap.py |
# MIT License
#
# Copyright 2018-2021 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Contains the CharMapper class (for mapping characters in a Unicode string to
other strings) and custom exceptions raised by CharMapper.
"""
from __future__ import absolute_import
from collections import deque
from collections.abc import Mapping
import os
import json
from .stringutils import isunicode
class InvalidCharMapKeyError(ValueError):
"""Exception raised when an invalid key is found in a charmap used to
initialize :obj:`CharMapper`.
"""
def __init__(self, key, message):
super(InvalidCharMapKeyError, self).__init__(message)
self.key = key
self.message = message
def __repr__(self):
return 'InvalidCharMapKeyError({}, {})'.format(
repr(self.key), repr(self.message)
)
def __str__(self):
return self.message
class BuiltinCharMapNotFoundError(ValueError):
"""Exception raised when a specified map name passed to
:func:`CharMapper.builtin_mapper` is not in the list of builtin maps.
"""
def __init__(self, map_name, message):
super(BuiltinCharMapNotFoundError, self).__init__(message)
self.map_name = map_name
self.message = message
def __repr__(self):
return 'BuiltinCharMapNotFoundError({}, {})'.format(
repr(self.map_name), repr(self.message)
)
def __str__(self):
return self.message
class CharMapper(object):
"""A class for mapping characters in a Unicode string to other strings.
Args:
charmap (:obj:`dict`): A dictionary or any other dictionary-like
obeject (implementing collections.Mapping) mapping characters
or range of characters to a string. Keys in the dictionary
should be Unicode strings of length 1 or 3. Strings of length 1
indicate a single character to be mapped, while strings of
length 3 indicate a range. Range strings should have the format
'a-b' where is the starting character in the range and 'b' is
the last character in the range (inclusive). 'b' should have a
strictly larger ordinal number than 'a'. Dictionary values
should be either strings or `None`, where `None` indicates that
characters are mapped to themselves. Use an empty string to
indicate deletion.
default (:obj:`str`, optional): The default value to map characters
not in **charmap** to. `None` indicates that characters map to
themselves. Defaults to `None`.
Raises:
:obj:`InvalidCharMapKeyError`: If a key in charmap is not a Unicode
string containing either a single character or a valid
character range.
:obj:`TypeError`: If default or a value for a key in charmap is
neither `None` nor a Unicode string, or if **charmap** is not a
dictionary-like object.
"""
BUILTIN_CHARMAPS = frozenset((
'ar2bw',
'ar2safebw',
'ar2xmlbw',
'ar2hsb',
'bw2ar',
'bw2safebw',
'bw2xmlbw',
'bw2hsb',
'safebw2ar',
'safebw2bw',
'safebw2xmlbw',
'safebw2hsb',
'xmlbw2ar',
'xmlbw2bw',
'xmlbw2safebw',
'xmlbw2hsb',
'hsb2ar',
'hsb2bw',
'hsb2safebw',
'hsb2xmlbw',
'arclean',
))
@staticmethod
def _expand_char_map(charmap):
"""Creates a new dictionary from charmap where character ranges are
expanded and given their own dictionary entry.
Args:
charmap (:obj:`dict`): The character map to be expanded.
Raises:
:obj:`InvalidCharMapKeyError`: If a key in **charmap** is not a
Unicode string containing either a single character or a valid
character range.
:obj:`TypeError`: If a value for a key in **charmap** is neither
`None` nor a Unicode string.
"""
# TODO: Implement a space efficient character map data structure
new_map = {}
for key in charmap.keys():
# Check that key is a string
if not isunicode(key):
raise TypeError('Expected string as key. '
'Got {} instead.'.format(type(key)))
# If string is one character long we can directly add it to the map
if len(key) == 1:
if charmap[key] is not None and not isunicode(charmap[key]):
raise TypeError(
('Expected a Unicode string or None value for key '
'value, got {} instead.').format(type(charmap[key])))
else:
new_map[key] = charmap[key]
# We check if it's a range with the following rules:
# a) The string is 3 character long with a dash '-' in the
# middle.
# b) The first character must have a strictly smaller ordinal
# than the last character.
elif len(key) == 3 and key[1] == '-':
if ord(key[0]) >= ord(key[2]):
raise InvalidCharMapKeyError(key, '')
else:
if (charmap[key] is not None
and not isunicode(charmap[key])):
raise TypeError(
('Expected a Unicode string or None value for key '
'value, got {} instead.').format(
type(charmap[key]))
)
for char in range(ord(key[0]), ord(key[2]) + 1):
new_map[chr(char)] = charmap[key]
# Otherwise, we have an invalid map key
else:
raise InvalidCharMapKeyError(
key, 'Invalid character or character range')
return new_map
def __init__(self, charmap, default=None):
"""Class constructor.
"""
if isinstance(charmap, Mapping):
self._charmap = self._expand_char_map(charmap)
else:
raise TypeError(
('Expected a dictionary like object for charmap, got {} '
'instead').format(type(charmap)))
if default is None or isunicode(default):
self._default = default
else:
raise TypeError(
('Expected a Unicode string or None value for default, got {} '
'instead.').format(type(default)))
def __call__(self, s):
"""Alias for :func:`CharMapper.map_string`.
"""
return self.map_string(s)
@staticmethod
def mapper_from_json(fpath):
"""Creates a :obj:`CharMapper` instance from a JSON file.
Args:
fpath (:obj:`str`): Path to JSON file.
Returns:
:obj:`CharMapper`: A new :obj:`CharMapper` instance generated from
given JSON file.
Raises:
:obj:`InvalidCharMapKeyError`: If a key in charmap is not a Unicode
string containing either a single character or a valid
character range.
:obj:`TypeError`: If default or a value for a key in charmap is
neither `None` nor a Unicode string.
:obj:`FileNotFoundError`: If file at `fpath` doesn't exist.
:obj:`JSONDecodeError`: If `fpath` is not a valid JSON file.
"""
with open(fpath, 'r', encoding='utf-8') as charmap_fp:
jsonstr = charmap_fp.read()
json_dict = json.loads(jsonstr)
return CharMapper(
json_dict.get('charMap', {}),
default=json_dict.get('default', None)
)
@staticmethod
def builtin_mapper(map_name):
"""Creates a :obj:`CharMapper` instance from built-in mappings.
Args:
map_name (:obj:`str`): Name of built-in map.
Returns:
:obj:`CharMapper`: A new :obj:`CharMapper` instance of built-in
map.
Raises:
:obj:`BuiltinCharMapNotFound`: If `map_name` is not in the list of
built-in maps.
"""
if map_name not in CharMapper.BUILTIN_CHARMAPS:
raise BuiltinCharMapNotFoundError(
map_name,
'No built in mapping with name \'{}\' '
'was found.'.format(map_name))
try:
charmaps_dir = os.path.join(os.path.dirname(__file__), 'charmaps')
# This should never happen unless there something wrong with the
# system or the installation.
except Exception: # pragma: no coverage
raise BuiltinCharMapNotFoundError(
map_name,
'Could not create mapping with name \'{}\'.'.format(map_name))
map_path = os.path.join(charmaps_dir, '{}_map.json'.format(map_name))
return CharMapper.mapper_from_json(map_path)
def map_string(self, s):
"""Maps each character in a given string to its corresponding value in
the charmap.
Args:
s (:obj:`str`): A Unicode string to be mapped.
Returns:
:obj:`str`: A new Unicode string with the charmap applied.
Raises:
:obj:`TypeError`: If s is not a Unicode string.
"""
if not isunicode(s):
raise TypeError((
'Expected Unicode string as input, got {} instead.'
).format(type(s)))
buff = deque()
for char in s:
transliteration = self._charmap.get(char, self._default)
if transliteration is None:
buff.append(char)
else:
buff.append(transliteration)
return ''.join(buff) | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/setuptools/package_index.py | import sys
import os
import re
import shutil
import socket
import base64
import hashlib
import itertools
import warnings
from functools import wraps
from setuptools.extern import six
from setuptools.extern.six.moves import urllib, http_client, configparser, map
import setuptools
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST, EGG_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from fnmatch import translate
from setuptools.py27compat import get_all_headers
from setuptools.py33compat import unescape
from setuptools.wheel import Wheel
__metaclass__ = type
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
PYPI_MD5 = re.compile(
r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
r'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
user_agent = _tmpl.format(py_major=sys.version[:3], setuptools=setuptools)
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py', -16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py', -20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base, py_ver, plat
def egg_info_for_url(url):
parts = urllib.parse.urlparse(url)
scheme, server, path, parameters, query, fragment = parts
base = urllib.parse.unquote(path.split('/')[-1])
if server == 'sourceforge.net' and base == 'download': # XXX Yuck
base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base:
base, fragment = base.split('#', 1)
return base, fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata):
yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.whl') and '-' in basename:
wheel = Wheel(basename)
if not wheel.is_compatible():
return []
return [Distribution(
location=location,
project_name=wheel.project_name,
version=wheel.version,
# Increase priority over eggs.
precedence=EGG_DIST + 1,
)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1, len(parts) + 1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence=precedence,
platform=platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos != -1:
match = HREF.search(page, pos)
if match:
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
class ContentChecker:
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urllib.parse.urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.org/simple/", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self, *args, **kw)
self.index_url = index_url + "/" [:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate, hosts))).match
self.to_scan = []
use_ssl = (
verify_ssl
and ssl_support.is_available
and (ca_bundle or ssl_support.find_ca_bundle())
)
if use_ssl:
self.opener = ssl_support.opener_for(ca_bundle)
else:
self.opener = urllib.request.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
tmpl = "Download error on %s: %%s -- Some packages may not be found!"
f = self.open_url(url, tmpl % url)
if f is None:
return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str):
# In Python 3 and got bytes but want str.
if isinstance(f, urllib.error.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path, item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
is_file = s and s.group(1).lower() == 'file'
if is_file or self.allows(urllib.parse.urlparse(url)[1]):
return True
msg = (
"\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/2hrImnY for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
dirs = filter(os.path.isdir, search_path)
egg_links = (
(path, entry)
for path in dirs
for entry in os.listdir(path)
if entry.endswith('.egg-link')
)
list(itertools.starmap(self.scan_egg_link, egg_links))
def scan_egg_link(self, path, entry):
with open(os.path.join(path, entry)) as raw_lines:
# filter non-empty lines
lines = list(filter(None, map(str.strip, raw_lines)))
if len(lines) != 2:
# format is not recognized; punt
return
egg_path, setup_path = lines
for dist in find_distributions(os.path.join(path, egg_path)):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self, url, page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
if len(parts) == 2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(), {})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url += '#egg=%s-%s' % (pkg, ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg:
self.warn(msg, *args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name + '/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name + '/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key, ())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement, installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(
self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (
self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec, Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
spec = parse_requirement_arg(spec)
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence == DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn(
"Skipping development or system egg: %s", dist,
)
skipped[dist] = 1
continue
test = (
dist in req
and (dist.precedence <= SOURCE_DIST or not source)
)
if test:
loc = self.download(dist.location, tmpdir)
dist.download_location = loc
if os.path.exists(dist.download_location):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if not dist and local_index is not None:
dist = find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or working download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=dist.download_location)
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists) == 1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename = dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment, dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp = None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(url)
if isinstance(fp, urllib.error.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code, fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename, 'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp:
fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, http_client.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib.error.HTTPError as v:
return v
except urllib.error.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except http_client.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except (http_client.HTTPException, socket.error) as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..', '.').replace('\\', '_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir, name)
# Download the file
#
if scheme == 'svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme == 'git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme == 'file':
return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type', '').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at " + url)
def _download_svn(self, url, filename):
warnings.warn("SVN download support is deprecated", UserWarning)
url = url.split('#', 1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/', 1)
auth, host = _splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':', 1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username=" + auth
netloc = host
parts = scheme, netloc, url, p, q, f
url = urllib.parse.urlunparse(parts)
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#', 1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("git -C %s checkout --quiet %s" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("hg --cwd %s up -C -r %s -q" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def decode_entity(match):
what = match.group(0)
return unescape(what)
def htmldecode(text):
"""
Decode HTML entities in the given text.
>>> htmldecode(
... 'https://../package_name-0.1.2.tar.gz'
... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz')
'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz'
"""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = urllib.parse.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
encoded_bytes = base64.b64encode(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n', '')
class Credential:
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(configparser.RawConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
configparser.RawConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib.request.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
parsed = urllib.parse.urlparse(url)
scheme, netloc, path, params, query, frag = parsed
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise http_client.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, address = _splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)', *info)
if auth:
auth = "Basic " + _encode_auth(auth)
parts = scheme, address, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2 == scheme and h2 == address:
parts = s2, netloc, path2, param2, query2, frag2
fp.url = urllib.parse.urlunparse(parts)
return fp
# copy of urllib.parse._splituser from Python 3.8
def _splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
user, delim, host = host.rpartition('@')
return (user if delim else None), host
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = (
"<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream) | PypiClean |
/AutoTorch-0.0.2b20200818.tar.gz/AutoTorch-0.0.2b20200818/autotorch/scheduler/remote/remote.py | import os
import time
import signal
import atexit
import weakref
import logging
import subprocess
import concurrent
from threading import Thread
import multiprocessing as mp
from distributed import Client
from .ssh_helper import start_scheduler, start_worker
__all__ = ['Remote']
logger = logging.getLogger(__name__)
_global_remote_services = weakref.WeakValueDictionary()
_global_service_index = [0]
def _get_global_remote_service():
L = sorted(list(_global_remote_services), reverse=True)
for k in L:
c = _global_remote_services[k]
if c.status != "closed":
return c
else:
del _global_remote_services[k]
del L
return None
def _set_global_remote_service(c):
if c is not None:
_global_remote_services[_global_service_index[0]] = c
_global_service_index[0] += 1
def _close_global_remote_services():
"""
Force close of global client. This cleans up when a client
wasn't close explicitly, e.g. interactive sessions.
"""
c = _get_global_remote_service()
if c is not None:
c.shutdown()
class Service(object):
def __init__(self, proc):
self.proc = proc
self.status = 'live'
def shutdown(self):
os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM)
def start_service(remote_ip, port):
cmd = ['agremote', '--address', remote_ip, '--port', str(port)]
proc = subprocess.Popen(cmd)
return Service(proc)
class Remote(Client):
LOCK = mp.Lock()
REMOTE_ID = mp.Value('i', 0)
def __init__(self, remote_ip=None, port=None, local=False, ssh_username=None,
ssh_port=22, ssh_private_key=None, remote_python=None):
self.service = None
if local:
super().__init__(processes=False)
else:
remote_addr = (remote_ip + ':{}'.format(port))
self.service = start_service(remote_ip, port)
_set_global_remote_service(self.service)
import time
time.sleep(10)
super().__init__(remote_addr)
with Remote.LOCK:
self.remote_id = Remote.REMOTE_ID.value
Remote.REMOTE_ID.value += 1
def close(self, timeout=2):
if self.service:
self.service.shutdown()
super().close(timeout)
def upload_files(self, files, **kwargs):
for filename in files:
self.upload_file(filename, **kwargs)
def __repr__(self):
reprstr = self.__class__.__name__ + ' REMOTE_ID: {}, \n\t'.format(self.remote_id) + \
super().__repr__()
return reprstr
class DaskRemoteService(object):
def __init__(self, remote_addr, scheduler_port, ssh_username=None,
ssh_port=22, ssh_private_key=None, remote_python=None):
self.scheduler_addr = remote_addr
self.scheduler_port = scheduler_port
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.ssh_private_key = ssh_private_key
self.remote_python = remote_python
self.monitor_thread = Thread()
# Start the scheduler node
self.scheduler = start_scheduler(
remote_addr,
scheduler_port,
ssh_username,
ssh_port,
ssh_private_key,
remote_python,
)
# Start worker nodes
self.worker = start_worker(
self.scheduler_addr,
self.scheduler_port,
remote_addr,
self.ssh_username,
self.ssh_port,
self.ssh_private_key,
self.remote_python,
)
self.start_monitoring()
self.status = "live"
def start_monitoring(self):
if self.monitor_thread.is_alive():
return
self.monitor_thread = Thread(target=self.monitor_remote_processes)
#self.monitor_thread.daemon = True
self.monitor_thread.start()
def monitor_remote_processes(self):
all_processes = [self.scheduler, self.worker]
try:
while True:
for process in all_processes:
while not process["output_queue"].empty():
try:
msg = process["output_queue"].get()
if 'distributed.' in msg:
msg = msg.replace('distributed', 'autotorch')
print(msg)
except Exception as e:
print(f'Exception happend {e}, terminating the remote.')
break
# Kill some time and free up CPU
time.sleep(0.1)
except KeyboardInterrupt:
pass
def shutdown(self):
all_processes = [self.worker, self.scheduler]
for process in all_processes:
process["input_queue"].put("shutdown")
process["thread"].join()
self.status = "closed"
atexit.register(_close_global_remote_services) | PypiClean |
/GPyM-0.60b.tar.gz/GPyM-0.60b/get_dtime_gpm.py |
import os,sys
from optparse import OptionParser
from numpy import array
#from datetime import datetime, timedelta
def get_dtime_gpm(srcPath, fn_read):
if 'GMI' in srcPath : h5Grp = 'S1'
elif 'DPR' in srcPath : h5Grp = 'NS'
elif 'KuPR' in srcPath : h5Grp = 'NS'
elif 'KaPR' in srcPath : h5Grp = 'MS'
else:
raise ValueError, 'unknown hdf5 group [%s] for %s'%(h5Grp, srcPath)
Year = fn_read( srcPath,'%s/ScanTime/Year'%h5Grp ).astype('int')
Month = fn_read( srcPath,'%s/ScanTime/Month'%h5Grp ).astype('int')
Day = fn_read( srcPath,'%s/ScanTime/DayOfMonth'%h5Grp ).astype('int')
Hour = fn_read( srcPath,'%s/ScanTime/Hour'%h5Grp ).astype('int')
Minute = fn_read( srcPath,'%s/ScanTime/Minute'%h5Grp ).astype('int')
Second = fn_read( srcPath,'%s/ScanTime/Second'%h5Grp ).astype('int')
MicSec = fn_read( srcPath,'%s/ScanTime/MilliSecond'%h5Grp ).astype('int')*1000
return array( [Year, Month, Day, Hour, Minute, Second, MicSec] ).T
'''
DTime = []
for y,m,d,H,M,S,uS in map(None,Year,Month,Day,Hour,Minute,Second,MicSec):
if uS == 1000000:
DTime.append( datetime(y,m,d,H,M,S,0)+timedelta(seconds=1) )
print 'Warning [NS/ScanTime/Millisecond] == 1000 : %i %i %i %i %i %i %i'%(y,m,d,H,M,S,uS/1000)
else:
DTime.append( datetime(y,m,d,H,M,S,uS) )
return array( DTime )
'''
def main(args,opts):
print args
print opts
return
if __name__=='__main__':
usage = 'usage: %prog [options] arg'
version = '%prog 1.0'
parser = OptionParser(usage=usage,version=version)
# parser.add_option('-r','--rescan',action='store_true',dest='rescan',
# help='rescan all directory to find missing file')
(options,args) = parser.parse_args()
# if len(args) == 0:
# parser.print_help()
# else:
# main(args,options)
# LOG = LOGGER()
main(args,options) | PypiClean |
/Flask-SQLAlchemy-Booster-0.6.31.tar.gz/Flask-SQLAlchemy-Booster-0.6.31/flask_sqlalchemy_booster/entities_router/__init__.py | from flask import Response
import json
from schemalite.core import json_encoder
from .crud_constructors import (
construct_get_view_function,
construct_index_view_function, construct_post_view_function,
construct_put_view_function, construct_delete_view_function,
construct_patch_view_function, construct_batch_save_view_function)
from . import entity_definition_keys as edk
from toolspy import (
all_subclasses, fetch_nested_key_from_dict, fetch_nested_key,
delete_dict_keys, union, merge, difference, transform_dict)
from copy import deepcopy
class EntityOperation(object):
"""
Base class which represents a crud operation on the entity
"""
def __init__(self, entity=None):
self.init_entity(entity)
def init_entity(self, entity=None):
self.entity = entity
def to_dict(self):
raise NotImplementedError
class Get(EntityOperation):
"""This class represents a GET operation on an entity.
Registers a GET endpoint at /<entity.url_slug>/<id>
Parameters
------------
entity: Entity
The entity object on which the Get operation is defined. Should be specified if the get
operation is defined separately after the entity is defined. Can be skipped if it is instead
defined as a part of the entity definition
query_modifier: function, Optional
A function which can modify the query used to fetch the object to be returned. By default
the router obtains the instance to be fetched by Get by filtering the id attribute
to be equal to the value of the id in the url.
If we want to set some more filters before filtering
should be a function which accepts a query and returns a query
For example, if an api is supposed to return only confirmed orders, we can set the
query_modifier like this
query_modifier = lambda q: q.filter(Order.confirmed==True)
This will take precedence over the query_modifier defined at entity level
permitted_object_getter: function, optional
A function which if set, will be used to retrieve the object to get. If This
callable is set, then this will be used instead of the query used to get
the object by default.
For example if you want to get the current user always when registering user
model in an api, you can set like this
>>> Get(permitted_object_getter=lambda: current_user)
This will take precedence over the permitted_object_getter defined at entity level
id_attr: str, optional
By default the primary key is used as the id attribute. But we can modify it to some
other field. For example if we want the url to be like /users/abcd@xyz.com, then we cant
set
>>> Get(id_attr='email')
response_dict_struct: dict, optional
The dictionary used to specify the structure of the object
Example:
Get(
response_dict_struct=dict(
attrs=["id", "name", "description"],
rels={
"tasks": dict(
attrs=["id", "title"],
rels={
"assignees": dict(attrs=["name", "email"])
}
),
"projects": {}
}
)
response_dict_modifiers: List[Callable[[dict, model instance], dict]], Optional
A list of functions, where each function should be able to accept the response
dictionary as the first argument and the instance which is being fetched
as the second argument, and then make any modifications as required to the
response dict and return it
url: str
Optional. Provide this if you want to override the default url for the Get
operation which is of the format /<entity.url_slug>/<id>. For example if you
want to define a special endpoint /accounts/current which will let the client
access the currently logged in account without knowing the id, then you would need
to set this url parameter
"""
method = 'get'
def __init__(
self, entity=None, view_function=None, query_modifier=None,
permitted_object_getter=None, id_attr=None, response_dict_struct=None,
response_dict_modifiers=None, exception_handler=None, access_checker=None,
url=None, enable_caching=False, cache_key_determiner=None,
cache_timeout=None, ):
super(Get, self).__init__(entity=entity)
self.url = url
self.enable_caching = enable_caching
self.cache_key_determiner = cache_key_determiner
self.cache_timeout = cache_timeout
self.view_function = view_function
self.query_modifier = query_modifier
self.permitted_object_getter = permitted_object_getter
self.id_attr = id_attr
self.response_dict_struct = response_dict_struct
self.response_dict_modifiers = response_dict_modifiers
self.exception_handler = exception_handler
self.access_checker = access_checker
def to_dict(self):
return transform_dict({
edk.URL: self.url,
edk.ENABLE_CACHING: self.enable_caching,
edk.CACHE_KEY_DETERMINER: self.cache_key_determiner,
edk.CACHE_TIMEOUT: self.cache_timeout,
edk.VIEW_FUNC: self.view_function,
edk.QUERY_MODIFIER: self.query_modifier,
edk.PERMITTED_OPERATIONS: self.permitted_object_getter,
edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,
edk.RESPONSE_DICT_MODIFIERS: self.response_dict_modifiers,
edk.EXCEPTION_HANDLER: self.exception_handler,
edk.ACCESS_CHECKER: self.access_checker,
}, skip_none_vals=True)
class Index(EntityOperation):
method = 'index'
def __init__(
self, entity=None, url=None, view_function=None, enable_caching=None,
cache_key_determiner=None, cache_timeout=None, query_modifier=None,
response_dict_struct=None, custom_response_creator=None,
exception_handler=None, access_checker=None,
default_limit=None, default_sort=None, default_orderby=None,
default_offset=None, default_page=None, default_per_page=None):
super(Index, self).__init__(entity=entity)
self.url = url
self.view_function = view_function
self.enable_caching = enable_caching
self.cache_key_determiner = cache_key_determiner
self.cache_timeout = cache_timeout
self.query_modifier = query_modifier
self.response_dict_struct = response_dict_struct
self.custom_response_creator = custom_response_creator
self.exception_handler = exception_handler
self.access_checker = access_checker
self.default_limit = default_limit
self.default_sort = default_sort
self.default_orderby = default_orderby
self.default_offset = default_offset
self.default_page = default_page
self.default_per_page = default_per_page
def to_dict(self):
return transform_dict({
edk.URL: self.url,
edk.VIEW_FUNC: self.view_function,
edk.ENABLE_CACHING: self.enable_caching,
edk.CACHE_KEY_DETERMINER: self.cache_key_determiner,
edk.CACHE_TIMEOUT: self.cache_timeout,
edk.QUERY_MODIFIER: self.query_modifier,
edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,
edk.CUSTOM_RESPONSE_CREATOR: self.custom_response_creator,
edk.EXCEPTION_HANDLER: self.exception_handler,
edk.ACCESS_CHECKER: self.access_checker,
edk.DEFAULT_LIMIT: self.default_limit,
edk.DEFAULT_SORT: self.default_sort,
edk.DEFAULT_ORDERBY: self.default_orderby,
edk.DEFAULT_OFFSET: self.default_offset,
edk.DEFAULT_PAGE: self.default_page,
edk.DEFAULT_PER_PAGE: self.default_per_page
}, skip_none_vals=True)
class Post(EntityOperation):
method = 'post'
def __init__(
self, entity=None, url=None, view_function=None, before_save=None, after_save=None,
response_dict_struct=None, exception_handler=None, access_checker=None,
settable_fields=None, non_settable_fields=None,
remove_property_keys_before_validation=None, remove_relationship_keys_before_validation=None,
remove_assoc_proxy_keys_before_validation=None, input_schema_modifier=None):
super(Post, self).__init__(entity=entity)
self.url = url
self.view_function = view_function
self.before_save = before_save
self.after_save = after_save
self.response_dict_struct = response_dict_struct
self.exception_handler = exception_handler
self.access_checker = access_checker
self.settable_fields = settable_fields
self.non_settable_fields = non_settable_fields
self.remove_property_keys_before_validation = remove_property_keys_before_validation
self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation
self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation
self.input_schema_modifier = input_schema_modifier
def to_dict(self):
return transform_dict({
edk.URL: self.url,
edk.VIEW_FUNC: self.view_function,
edk.BEFORE_SAVE_HANDLERS: self.before_save,
edk.AFTER_SAVE_HANDLERS: self.after_save,
edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,
edk.EXCEPTION_HANDLER: self.exception_handler,
edk.ACCESS_CHECKER: self.access_checker,
edk.SETTABLE_FIELDS: self.settable_fields,
edk.NON_SETTABLE_FIELDS: self.non_settable_fields,
edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation,
edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,
edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,
edk.INPUT_SCHEMA_MODIFIER: self.input_schema_modifier
}, skip_none_vals=True)
class Put(EntityOperation):
method = 'put'
def __init__(
self, entity=None, url=None, view_function=None,
query_modifier=None,
permitted_object_getter=None,
before_save=None, after_save=None,
response_dict_struct=None, exception_handler=None, access_checker=None,
settable_fields=None, non_settable_fields=None,
remove_property_keys_before_validation=None, remove_relationship_keys_before_validation=None,
remove_assoc_proxy_keys_before_validation=None, input_schema_modifier=None):
super(Put, self).__init__(entity=entity)
self.url = url
self.view_function = view_function
self.query_modifier = query_modifier
self.permitted_object_getter = permitted_object_getter
self.before_save = before_save
self.after_save = after_save
self.response_dict_struct = response_dict_struct
self.exception_handler = exception_handler
self.access_checker = access_checker
self.settable_fields = settable_fields
self.non_settable_fields = non_settable_fields
self.remove_property_keys_before_validation = remove_property_keys_before_validation
self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation
self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation
self.input_schema_modifier = input_schema_modifier
def to_dict(self):
return transform_dict({
edk.URL: self.url,
edk.VIEW_FUNC: self.view_function,
edk.QUERY_MODIFIER: self.query_modifier,
edk.PERMITTED_OBJECT_GETTER: self.permitted_object_getter,
edk.BEFORE_SAVE_HANDLERS: self.before_save,
edk.AFTER_SAVE_HANDLERS: self.after_save,
edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,
edk.EXCEPTION_HANDLER: self.exception_handler,
edk.ACCESS_CHECKER: self.access_checker,
edk.SETTABLE_FIELDS: self.settable_fields,
edk.NON_SETTABLE_FIELDS: self.non_settable_fields,
edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation,
edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,
edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,
edk.INPUT_SCHEMA_MODIFIER: self.input_schema_modifier
}, skip_none_vals=True)
class Patch(EntityOperation):
method = 'patch'
def __init__(
self, entity=None, url=None, view_function=None, query_modifier=None,
commands=None, permitted_object_getter=None,
before_save=None, after_save=None,
response_dict_struct=None, exception_handler=None, access_checker=None,
settable_fields=None, non_settable_fields=None,
remove_property_keys_before_validation=None, remove_relationship_keys_before_validation=None,
remove_assoc_proxy_keys_before_validation=None, input_schema_modifier=None):
super(Patch, self).__init__(entity=entity)
self.url = url
self.view_function = view_function
self.query_modifier = query_modifier
self.permitted_object_getter = permitted_object_getter
self.commands = commands
self.before_save = before_save
self.after_save = after_save
self.response_dict_struct = response_dict_struct
self.exception_handler = exception_handler
self.access_checker = access_checker
self.settable_fields = settable_fields
self.non_settable_fields = non_settable_fields
self.remove_property_keys_before_validation = remove_property_keys_before_validation
self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation
self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation
self.input_schema_modifier = input_schema_modifier
def to_dict(self):
return transform_dict({
edk.URL: self.url,
edk.VIEW_FUNC: self.view_function,
edk.QUERY_MODIFIER: self.query_modifier,
edk.BEFORE_SAVE_HANDLERS: self.before_save,
edk.AFTER_SAVE_HANDLERS: self.after_save,
edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,
edk.EXCEPTION_HANDLER: self.exception_handler,
edk.ACCESS_CHECKER: self.access_checker,
edk.SETTABLE_FIELDS: self.settable_fields,
edk.NON_SETTABLE_FIELDS: self.non_settable_fields,
edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation,
edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,
edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,
edk.INPUT_SCHEMA_MODIFIER: self.input_schema_modifier
}, skip_none_vals=True)
class Delete(EntityOperation):
method = 'delete'
def __init__(
self, entity=None, url=None, view_function=None, query_modifier=None,
permitted_object_getter=None,
before_save=None, after_save=None,
response_dict_struct=None, exception_handler=None, access_checker=None,
settable_fields=None, non_settable_fields=None,
remove_property_keys_before_validation=None, remove_relationship_keys_before_validation=None,
remove_assoc_proxy_keys_before_validation=None, input_schema_modifier=None):
super(Delete, self).__init__(entity=entity)
self.url = url
self.view_function = view_function
self.query_modifier = query_modifier
self.permitted_object_getter = permitted_object_getter
self.before_save = before_save
self.after_save = after_save
self.response_dict_struct = response_dict_struct
self.exception_handler = exception_handler
self.access_checker = access_checker
self.settable_fields = settable_fields
self.non_settable_fields = non_settable_fields
self.remove_property_keys_before_validation = remove_property_keys_before_validation
self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation
self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation
self.input_schema_modifier = input_schema_modifier
def to_dict(self):
return transform_dict({
edk.URL: self.url,
edk.VIEW_FUNC: self.view_function,
edk.QUERY_MODIFIER: self.query_modifier,
edk.PERMITTED_OBJECT_GETTER: self.permitted_object_getter,
edk.BEFORE_SAVE_HANDLERS: self.before_save,
edk.AFTER_SAVE_HANDLERS: self.after_save,
edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,
edk.EXCEPTION_HANDLER: self.exception_handler,
edk.ACCESS_CHECKER: self.access_checker,
edk.SETTABLE_FIELDS: self.settable_fields,
edk.NON_SETTABLE_FIELDS: self.non_settable_fields,
edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation,
edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,
edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,
edk.INPUT_SCHEMA_MODIFIER: self.input_schema_modifier
}, skip_none_vals=True)
class BatchSave(EntityOperation):
method = 'batch_save'
def __init__(
self, entity=None, url=None, view_function=None, query_modifier=None,
permitted_object_getter=None, unique_identifier_fields=None,
before_save=None, after_save=None,
extra_actions_before_save=None, extra_actions_after_save=None,
result_saving_instance_model=None,
result_saving_instance_getter=None,
run_as_async_task=False, celery_worker=None,
response_dict_struct=None, exception_handler=None, access_checker=None,
settable_fields=None, non_settable_fields=None,
remove_property_keys_before_validation=False, remove_relationship_keys_before_validation=False,
remove_assoc_proxy_keys_before_validation=False, input_schema_modifier=None,
update_only=False, create_only=False,
skip_pre_processors=False, skip_post_processors=False):
super(BatchSave, self).__init__(entity=entity)
self.url = url
self.view_function = view_function
self.query_modifier = query_modifier
self.permitted_object_getter = permitted_object_getter
self.unique_identifier_fields = unique_identifier_fields
self.result_saving_instance_model = result_saving_instance_model
self.result_saving_instance_getter = result_saving_instance_getter
self.run_as_async_task = run_as_async_task
self.celery_worker = celery_worker
self.update_only = update_only
self.create_only = create_only
self.skip_pre_processors = skip_pre_processors
self.skip_post_processors = skip_post_processors
self.before_save = before_save
self.after_save = after_save
self.extra_actions_before_save = extra_actions_before_save
self.extra_actions_after_save = extra_actions_after_save
self.response_dict_struct = response_dict_struct
self.exception_handler = exception_handler
self.access_checker = access_checker
self.settable_fields = settable_fields
self.non_settable_fields = non_settable_fields
self.remove_property_keys_before_validation = remove_property_keys_before_validation
self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation
self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation
self.input_schema_modifier = input_schema_modifier
class Entity(object):
"""This class represents a resource on which all the CRUD operations are defined.
Think of it as a wrapper around the model. The same model can be exposed as different
entities in different parts of the application
Parameters
------------
url_slug: str, Optional
The common url slug which will be used to define the various CRUD endpoints
defined on the entity. For example for an entity named Order, you can define
the url_slug as orders
model_class: class:flask_sqlalchemy.model.DefaultMeta
The model for which the entity is being defined. This should be a Model class
defined with FlaskSQLAlchemyBooster's model meta class as the base.
name: str, optional
An optional name for the entity. If it is not specified, the model name will be used
as the name
router: class:EntityRouter
The router to which the entity is to be linked. To be specified if the entity is
defined separately
"""
def __init__(
self, url_slug=None, model_class=None, name=None, router=None,
permitted_operations=None, permitted_object_getter=None,
forbidden_operations=None, endpoint_slug=None, input_schema_modifier=None,
query_modifier=None, access_checker=None, exception_handler=None, response_dict_modifiers=None,
id_attr=None, response_dict_struct=None, non_settable_fields=None, settable_fields=None,
remove_relationship_keys_before_validation=None, remove_assoc_proxy_keys_before_validation=None,
remove_property_keys_before_validation=None, enable_caching=False, cache_timeout=None,
get=None, index=None, put=None, post=None, patch=None, delete=None, batch_save=None):
self.model_class = model_class
self.name = name or self.model_class.__name__
self.router = router
if self.router:
if self not in self.router.routes:
self.router.routes[self.url_slug] = self
self.url_slug = url_slug
self.permitted_object_getter = permitted_object_getter
self.permitted_operations = permitted_operations
self.forbidden_operations = forbidden_operations
self.endpoint_slug = endpoint_slug
self.input_schema_modifier = input_schema_modifier
self.query_modifier = query_modifier
self.access_checker = access_checker
self.exception_handler = exception_handler
self.response_dict_modifiers = response_dict_modifiers
self.response_dict_struct = response_dict_struct
self.id_attr = id_attr
self.non_settable_fields = non_settable_fields if non_settable_fields else []
self.settable_fields = settable_fields if settable_fields else []
self.enable_caching = enable_caching
self.cache_timeout = cache_timeout
self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation
self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation
self.remove_property_keys_before_validation = remove_property_keys_before_validation
self.get = get
if self.get and self.get.entity is None:
self.get.init_entity(self)
self.index = index
if self.index and self.index.entity is None:
self.index.init_entity(self)
self.post = post
if self.post and self.post.entity is None:
self.post.init_entity(self)
self.put = put
if self.put and self.put.entity is None:
self.put.init_entity(self)
self.delete = delete
if self.delete and self.delete.entity is None:
self.delete.init_entity(self)
self.patch = patch
if self.patch and self.patch.entity is None:
self.patch.init_entity(self)
self.batch_save = batch_save
if self.batch_save and self.batch_save.entity is None:
self.batch_save.init_entity(self)
def to_dict(self):
return transform_dict({
edk.URL_SLUG: self.url_slug,
edk.PERMITTED_OPERATIONS: self.permitted_operations,
edk.FORBIDDEN_OPERATIONS: self.forbidden_operations,
edk.ENDPOINT_SLUG: self.endpoint_slug,
edk.QUERY_MODIFIER: self.query_modifier,
edk.ACCESS_CHECKER: self.access_checker,
edk.EXCEPTION_HANDLER: self.exception_handler,
edk.RESPONSE_DICT_MODIFIERS: self.response_dict_modifiers,
edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,
edk.ID_ATTR: self.id_attr,
edk.NON_SETTABLE_FIELDS: self.non_settable_fields,
edk.SETTABLE_FIELDS: self.settable_fields,
edk.ENABLE_CACHING: self.enable_caching,
edk.CACHE_TIMEOUT: self.cache_timeout,
edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,
edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,
edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation
}, skip_none_vals=True)
class EntitiesRouter(object):
"""
Contains a collection of entities mapped to url routes. The router
can be mounted on an app or a blueprint.
Parameters
-----------
mount_point: Flask app or blueprint
The app or blueprint on which the router is to be mounted. If this
parameter is specified, the router will be mounted immediately. Or
you can leave this unspecified and later call `router.mount_on(app_or_bp)`
routes: dict
A dictionary of url slugs mapped to entities like this
{
"orders": Entity(model_class=Order, index=Index(), get=Get(), post=Post(), put=Put()),
"users": Entity(model_class=User, index=Index())
}
cache_handler: `class:FlaskCaching`
A cache instance. Currently supports only FlaskCaching
exception_handler: function, optional
A function which accepts an exception and returns a json response
Example:
>>> def log_exception_and_return_json(e):
>>> return error_json(400, e.message)
celery_worker: Celery, optional
A celery worker which will be used to run the async batch save operation.
register_schema_definition: bool, optional
A bool flag which specifies whether the schema definition json needs to be registered.
schema_def_url: str, Optional
The url slug to be used to register the schema definition
register_views_map: bool, optional
A bool flag which specifies whether the views map json needs to be registered.
views_map_url: str, Optional
The url slug to be used to register the views map
"""
def __init__(self,
mount_point=None, routes=None, allow_unknown_fields=False,
cache_handler=None, exception_handler=None,
tmp_folder_path="/tmp", permitted_operations=None,
forbidden_operations=None, celery_worker=None,
register_schema_definition=True, register_views_map=True,
schema_def_url='/schema-def', views_map_url='/views-map',
base_url=None
):
self.schema_definition = {
"models_registered_for_views": [],
"model_schemas": {
},
"views": {
}
}
self.routes = routes or {}
for url_slug, entity in self.routes.items():
if entity.url_slug is None:
entity.url_slug = url_slug
if entity.router is None:
entity.router = self
self.allow_unknown_fields = allow_unknown_fields
self.cache_handler = cache_handler
self.exception_handler = exception_handler
self.tmp_folder_path = tmp_folder_path
self.permitted_operations = permitted_operations
self.forbidden_operations = forbidden_operations
self.celery_worker = celery_worker
self.register_schema_definition = register_schema_definition
self.register_views_map = register_views_map
self.schema_def_url = schema_def_url
self.views_map_url = views_map_url
# self.registry = {}
self.initialize_registry_entry()
if mount_point:
self.mount_point = mount_point
self.mount_on(self.mount_point)
def route(self, url_slug, entity):
self.routes[url_slug] = entity
if entity.url_slug is None:
entity.url_slug = url_slug
if entity.router is None:
entity.router = self
def get_registry_entry(self):
return self.registry
def initialize_registry_entry(self):
self.registry = {
"models_registered_for_views": [],
"model_schemas": {
},
edk.OPERATION_MODIFIERS: {
}
}
def mount_on(
self, app_or_bp, allow_unknown_fields=None, cache_handler=None,
exception_handler=None,
tmp_folder_path=None, celery_worker=None,
register_schema_definition=None, register_views_map=None,
schema_def_url=None, views_map_url=None):
self.mount_point = app_or_bp
if allow_unknown_fields is None:
allow_unknown_fields = self.allow_unknown_fields
if register_schema_definition is None:
register_schema_definition = self.register_schema_definition
if register_views_map is None:
register_views_map = self.register_views_map
self.register_crud_routes(
allow_unknown_fields=allow_unknown_fields,
cache_handler=cache_handler or self.cache_handler,
exception_handler=exception_handler or self.exception_handler,
tmp_folder_path=tmp_folder_path or self.tmp_folder_path,
celery_worker=celery_worker or self.celery_worker,
register_schema_definition=register_schema_definition,
register_views_map=register_views_map,
schema_def_url=schema_def_url or self.schema_def_url,
views_map_url=views_map_url or self.views_map_url
)
def to_dict(self):
entities_map = {}
for url_slug, entity in self.routes.items():
entities_map[entity.name or entity.model_class] = entity.to_dict()
return entities_map
def register_crud_routes(
self, allow_unknown_fields=False, cache_handler=None,
exception_handler=None,
tmp_folder_path="/tmp", celery_worker=None,
register_schema_definition=True, register_views_map=True,
schema_def_url='/schema-def', views_map_url='/views-map'):
app_or_bp = self.mount_point
registry = self.get_registry_entry()
model_schemas = registry["model_schemas"]
def populate_model_schema(model_class, entity=None):
model_key = fetch_nested_key(entity, 'name') or model_class.__name__
if model_class._input_data_schema_:
input_schema = deepcopy(model_class._input_data_schema_)
else:
input_schema = model_class.generate_input_data_schema()
if entity and callable(entity.input_schema_modifier):
input_schema = entity.input_schema_modifier(
input_schema)
model_schemas[model_key] = {
"input_schema": input_schema,
"output_schema": model_class.output_data_schema(),
"accepted_data_structure": model_class.max_permissible_dict_structure()
}
for subcls in all_subclasses(model_class):
if subcls.__name__ not in model_schemas:
model_schemas[subcls.__name__] = {
'is_a_polymorphically_derived_from': model_class.__name__,
'polymorphic_identity': subcls.__mapper_args__['polymorphic_identity']
}
for rel in model_class.__mapper__.relationships.values():
if rel.mapper.class_.__name__ not in model_schemas:
populate_model_schema(rel.mapper.class_)
for url_slug, entity in self.routes.items():
_model = entity.model_class
_model_name = entity.name
base_url = url_slug
# base_url = _model_dict.get(edk.URL_SLUG)
default_query_constructor = entity.query_modifier
default_access_checker = entity.access_checker
default_exception_handler = entity.exception_handler or exception_handler
default_dict_post_processors = entity.response_dict_modifiers
default_id_attr = entity.id_attr
dict_struct_for_model = entity.response_dict_struct
fields_forbidden_from_being_set_for_all_views = entity.non_settable_fields or []
fields_allowed_to_be_set_for_all_views = entity.settable_fields or []
remove_relationship_keys_before_validation = entity.remove_relationship_keys_before_validation
remove_assoc_proxy_keys_before_validation = entity.remove_assoc_proxy_keys_before_validation
remove_property_keys_before_validation = entity.remove_property_keys_before_validation
enable_caching = entity.enable_caching and cache_handler is not None
cache_timeout = entity.cache_timeout
endpoint_slug = entity.endpoint_slug or _model.__tablename__
if _model_name not in registry["models_registered_for_views"]:
registry["models_registered_for_views"].append(
_model_name)
if _model_name not in model_schemas:
populate_model_schema(entity.model_class, entity)
if _model._input_data_schema_:
model_default_input_schema = deepcopy(_model._input_data_schema_)
else:
model_default_input_schema = _model.generate_input_data_schema()
if callable(entity.input_schema_modifier):
model_default_input_schema = entity.input_schema_modifier(
model_default_input_schema)
views = registry[edk.OPERATION_MODIFIERS]
schemas_registry = {k: v.get('input_schema')
for k, v in list(model_schemas.items())}
if _model_name not in views:
views[_model_name] = {}
if entity.index:
index_op = entity.index
if index_op.enable_caching is not None:
enable_caching = index_op.enable_caching and cache_handler is not None
cache_key_determiner = index_op.cache_key_determiner
cache_timeout = index_op.cache_timeout or cache_timeout
index_func = index_op.view_function or construct_index_view_function(
_model,
index_query_creator=index_op.query_modifier or default_query_constructor,
dict_struct=index_op.response_dict_struct or dict_struct_for_model,
custom_response_creator=index_op.custom_response_creator,
enable_caching=enable_caching,
cache_handler=cache_handler,
cache_key_determiner=cache_key_determiner,
cache_timeout=cache_timeout,
exception_handler=index_op.exception_handler or default_exception_handler,
access_checker=index_op.access_checker or default_access_checker,
default_limit=index_op.default_limit,
default_sort=index_op.default_sort,
default_orderby=index_op.default_orderby,
default_offset=index_op.default_offset,
default_page=index_op.default_page,
default_per_page=index_op.default_per_page
)
index_url = index_op.url or "/%s" % base_url
app_or_bp.route(
index_url, methods=['GET'], endpoint='index_%s' % endpoint_slug)(
index_func)
views[_model_name][edk.INDEX] = {edk.URL: index_url}
if entity.get:
get_op = entity.get
if get_op.enable_caching is not None:
enable_caching = get_op.enable_caching and cache_handler is not None
cache_key_determiner = get_op.cache_key_determiner
cache_timeout = get_op.cache_timeout or cache_timeout
get_func = get_op.view_function or construct_get_view_function(
_model,
permitted_object_getter=get_op.permitted_object_getter or entity.permitted_object_getter,
get_query_creator=get_op.query_modifier or default_query_constructor,
dict_struct=get_op.response_dict_struct or dict_struct_for_model,
enable_caching=enable_caching,
cache_handler=cache_handler, cache_key_determiner=cache_key_determiner,
cache_timeout=cache_timeout,
exception_handler=get_op.exception_handler or default_exception_handler,
access_checker=get_op.access_checker or default_access_checker,
id_attr_name=get_op.id_attr or default_id_attr,
dict_post_processors=get_op.response_dict_modifiers or default_dict_post_processors)
get_url = get_op.url or '/%s/<_id>' % base_url
app_or_bp.route(
get_url, methods=['GET'], endpoint='get_%s' % endpoint_slug)(
get_func)
views[_model_name]['get'] = {edk.URL: get_url}
if entity.post:
post_op = entity.post
if callable(post_op.input_schema_modifier):
post_input_schema = post_op.input_schema_modifier(
deepcopy(model_default_input_schema))
else:
post_input_schema = model_default_input_schema
post_func = post_op.view_function or construct_post_view_function(
_model, post_input_schema,
entities_group=self,
pre_processors=post_op.before_save,
post_processors=post_op.after_save,
schemas_registry=schemas_registry,
allow_unknown_fields=allow_unknown_fields,
dict_struct=post_op.response_dict_struct or dict_struct_for_model,
exception_handler=post_op.exception_handler or default_exception_handler,
access_checker=post_op.access_checker or default_access_checker,
remove_property_keys_before_validation=post_op.remove_property_keys_before_validation
if post_op.remove_property_keys_before_validation is not None
else remove_property_keys_before_validation,
remove_relationship_keys_before_validation=post_op.remove_relationship_keys_before_validation
if post_op.remove_relationship_keys_before_validation is not None
else remove_relationship_keys_before_validation,
remove_assoc_proxy_keys_before_validation=post_op.remove_assoc_proxy_keys_before_validation
if post_op.remove_assoc_proxy_keys_before_validation is not None
else remove_assoc_proxy_keys_before_validation,
fields_allowed_to_be_set=post_op.settable_fields or fields_allowed_to_be_set_for_all_views,
fields_forbidden_from_being_set=union([
fields_forbidden_from_being_set_for_all_views,
post_op.non_settable_fields or []
]))
post_url = post_op.url or "/%s" % base_url
app_or_bp.route(
post_url, methods=['POST'], endpoint='post_%s' % endpoint_slug)(
post_func)
views[_model_name]['post'] = {edk.URL: post_url}
if callable(post_op.input_schema_modifier):
views[_model_name]['post']['input_schema'] = post_op.input_schema_modifier(
deepcopy(model_schemas[_model.__name__]['input_schema']))
if entity.put:
put_op = entity.put
if callable(put_op.input_schema_modifier):
put_input_schema = put_op.input_schema_modifier(
deepcopy(model_default_input_schema))
else:
put_input_schema = model_default_input_schema
put_func = put_op.view_function or construct_put_view_function(
_model, put_input_schema,
entities_group=self,
permitted_object_getter=put_op.permitted_object_getter or entity.permitted_object_getter,
pre_processors=put_op.before_save,
post_processors=put_op.after_save,
dict_struct=put_op.response_dict_struct or dict_struct_for_model,
allow_unknown_fields=allow_unknown_fields,
query_constructor=put_op.query_modifier or default_query_constructor,
schemas_registry=schemas_registry,
exception_handler=put_op.exception_handler or default_exception_handler,
access_checker=put_op.access_checker or default_access_checker,
remove_property_keys_before_validation=put_op.remove_property_keys_before_validation
if put_op.remove_property_keys_before_validation is not None
else remove_property_keys_before_validation,
remove_relationship_keys_before_validation=put_op.remove_relationship_keys_before_validation
if put_op.remove_relationship_keys_before_validation is not None
else remove_relationship_keys_before_validation,
remove_assoc_proxy_keys_before_validation=put_op.remove_assoc_proxy_keys_before_validation
if put_op.remove_assoc_proxy_keys_before_validation is not None
else remove_assoc_proxy_keys_before_validation,
fields_allowed_to_be_set=put_op.settable_fields or fields_allowed_to_be_set_for_all_views,
fields_forbidden_from_being_set=union([
fields_forbidden_from_being_set_for_all_views,
put_op.non_settable_fields or []
]))
put_url = put_op.url or "/%s/<_id>" % base_url
app_or_bp.route(
put_url, methods=['PUT'], endpoint='put_%s' % endpoint_slug)(
put_func)
views[_model_name]['put'] = {edk.URL: put_url}
if callable(put_op.input_schema_modifier):
views[_model_name]['put']['input_schema'] = put_op.input_schema_modifier(
deepcopy(model_schemas[_model.__name__]['input_schema']))
if entity.patch:
patch_op = entity.patch
if callable(patch_op.input_schema_modifier):
patch_input_schema = patch_op.input_schema_modifier(
deepcopy(model_default_input_schema))
else:
patch_input_schema = model_default_input_schema
patch_func = patch_op.view_function or construct_patch_view_function(
_model, patch_input_schema,
pre_processors=patch_op.before_save,
commands=patch_op.commands,
post_processors=patch_op.after_save,
query_constructor=patch_op.query_modifier or default_query_constructor,
permitted_object_getter=patch_op.permitted_object_getter or entity.permitted_object_getter,
schemas_registry=schemas_registry,
exception_handler=patch_op.exception_handler or default_exception_handler,
access_checker=patch_op.access_checker or default_access_checker,
dict_struct=patch_op.response_dict_struct or dict_struct_for_model)
patch_url = patch_op.url or "/%s/<_id>" % base_url
app_or_bp.route(
patch_url, methods=['PATCH'], endpoint='patch_%s' % endpoint_slug)(
patch_func)
views[_model_name]['patch'] = {edk.URL: patch_url}
if callable(patch_op.input_schema_modifier):
views[_model_name]['patch']['input_schema'] = patch_op.input_schema_modifier(
deepcopy(model_schemas[_model.__name__]['input_schema']))
if entity.delete:
delete_op = entity.delete
delete_func = delete_op.view_function or construct_delete_view_function(
_model,
query_constructor=delete_op.query_modifier or default_query_constructor,
pre_processors=delete_op.before_save,
permitted_object_getter=delete_op.permitted_object_getter or entity.permitted_object_getter,
post_processors=delete_op.after_save,
exception_handler=delete_op.exception_handler or default_exception_handler,
access_checker=delete_op.access_checker or default_access_checker)
delete_url = delete_op.url or "/%s/<_id>" % base_url
app_or_bp.route(
delete_url, methods=['DELETE'], endpoint='delete_%s' % endpoint_slug)(
delete_func)
views[_model_name]['delete'] = {edk.URL: delete_url}
if entity.batch_save:
batch_save_op = entity.batch_save
if callable(batch_save_op.input_schema_modifier):
batch_save_input_schema = batch_save_op.input_schema_modifier(
deepcopy(model_default_input_schema))
else:
batch_save_input_schema = model_default_input_schema
batch_save_func = batch_save_op.view_function or construct_batch_save_view_function(
_model, batch_save_input_schema,
app_or_bp=app_or_bp,
pre_processors_for_post=fetch_nested_key(entity, 'post.before_save'),
pre_processors_for_put=fetch_nested_key(entity, 'put.before_save'),
post_processors_for_post=fetch_nested_key(entity, 'post.after_save'),
post_processors_for_put=fetch_nested_key(entity, 'put.before_save'),
extra_pre_processors=batch_save_op.extra_actions_before_save,
extra_post_processors=batch_save_op.extra_actions_after_save,
unique_identifier_fields=batch_save_op.unique_identifier_fields,
dict_struct=batch_save_op.response_dict_struct or dict_struct_for_model,
allow_unknown_fields=allow_unknown_fields,
query_constructor=batch_save_op.query_modifier or default_query_constructor,
schemas_registry=schemas_registry,
exception_handler=batch_save_op.exception_handler or default_exception_handler,
tmp_folder_path=tmp_folder_path,
fields_forbidden_from_being_set=union([
fields_forbidden_from_being_set_for_all_views,
batch_save_op.non_settable_fields or []
]),
celery_worker=celery_worker,
result_saving_instance_model=batch_save_op.result_saving_instance_model,
result_saving_instance_getter=batch_save_op.result_saving_instance_getter,
run_as_async_task=batch_save_op.run_as_async_task,
update_only=batch_save_op.update_only, create_only=batch_save_op.create_only,
skip_pre_processors=batch_save_op.skip_pre_processors,
skip_post_processors=batch_save_op.skip_post_processors
)
batch_save_url = batch_save_op.url or "/batch-save/%s" % base_url
app_or_bp.route(
batch_save_url, methods=['POST'], endpoint='batch_save_%s' % endpoint_slug)(
batch_save_func)
views[_model_name]['batch_save'] = {edk.URL: batch_save_url}
if callable(batch_save_op.input_schema_modifier):
views[_model_name]['batch_save']['input_schema'] = batch_save_op.input_schema_modifier(
deepcopy(model_schemas[_model.__name__]['input_schema']))
if register_schema_definition:
def schema_def():
return Response(
json.dumps(
registry,
default=json_encoder, sort_keys=True),
200, mimetype='application/json')
if cache_handler:
schema_def = cache_handler.cached(timeout=86400)(schema_def)
app_or_bp.route(schema_def_url, methods=['GET'])(schema_def)
if register_views_map:
def views_map():
return Response(
json.dumps(
registry[edk.OPERATION_MODIFIERS],
default=json_encoder, sort_keys=True),
200, mimetype='application/json')
if cache_handler:
views_map = cache_handler.cached(timeout=86400)(views_map)
app_or_bp.route(views_map_url, methods=['GET'])(views_map) | PypiClean |
/My-APScheduler-3.9.5.tar.gz/My-APScheduler-3.9.5/apscheduler/jobstores/mongodb.py | from __future__ import absolute_import
import warnings
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from bson.binary import Binary
from pymongo.errors import DuplicateKeyError
from pymongo import MongoClient, ASCENDING
except ImportError: # pragma: nocover
raise ImportError('MongoDBJobStore requires PyMongo installed')
class MongoDBJobStore(BaseJobStore):
"""
Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to
pymongo's `MongoClient
<http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient>`_.
Plugin alias: ``mongodb``
:param str database: database to store jobs in
:param str collection: collection to store jobs in
:param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of
providing connection arguments
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(self, database='apscheduler', collection='jobs', client=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
super(MongoDBJobStore, self).__init__()
self.pickle_protocol = pickle_protocol
if not database:
raise ValueError('The "database" parameter must not be empty')
if not collection:
raise ValueError('The "collection" parameter must not be empty')
if client:
self.client = maybe_ref(client)
else:
connect_args.setdefault('w', 1)
self.client = MongoClient(**connect_args)
self.collection = self.client[database][collection]
def start(self, scheduler, alias):
super(MongoDBJobStore, self).start(scheduler, alias)
self.collection.create_index('next_run_time', sparse=True)
@property
def connection(self):
warnings.warn('The "connection" member is deprecated -- use "client" instead',
DeprecationWarning)
return self.client
def lookup_job(self, job_id):
document = self.collection.find_one(job_id, ['job_state'])
return self._reconstitute_job(document['job_state']) if document else None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
return self._get_jobs({'next_run_time': {'$lte': timestamp}})
def get_next_run_time(self):
document = self.collection.find_one({'next_run_time': {'$ne': None}},
projection=['next_run_time'],
sort=[('next_run_time', ASCENDING)])
return utc_timestamp_to_datetime(document['next_run_time']) if document else None
def get_all_jobs(self):
jobs = self._get_jobs({})
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
try:
self.collection.insert_one({
'_id': job.id,
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
})
except DuplicateKeyError:
raise ConflictingIdError(job.id)
def update_job(self, job):
changes = {
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
}
result = self.collection.update_one({'_id': job.id}, {'$set': changes})
if result and result.matched_count == 0:
raise JobLookupError(job.id)
def remove_job(self, job_id):
result = self.collection.delete_one({'_id': job_id})
if result and result.deleted_count == 0:
raise JobLookupError(job_id)
def remove_all_jobs(self):
self.collection.delete_many({})
def shutdown(self):
self.client.close()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self, conditions):
jobs = []
failed_job_ids = []
for document in self.collection.find(conditions, ['_id', 'job_state'],
sort=[('next_run_time', ASCENDING)]):
try:
jobs.append(self._reconstitute_job(document['job_state']))
except BaseException:
self._logger.exception('Unable to restore job "%s" -- removing it',
document['_id'])
failed_job_ids.append(document['_id'])
# Remove all the jobs we failed to restore
if failed_job_ids:
self.collection.delete_many({'_id': {'$in': failed_job_ids}})
return jobs
def __repr__(self):
return '<%s (client=%s)>' % (self.__class__.__name__, self.client) | PypiClean |
/NSoL-0.1.14.tar.gz/NSoL-0.1.14/nsol/primal_dual_solver.py |
import numpy as np
from nsol.solver import Solver
import pysitk.python_helper as ph
##
# First-order primal-dual algorithm for convex problems
# min_x [f(x) + alpha g(Bx)] with B being a continuous linear operator.
#
class PrimalDualSolver(Solver):
##
# Store all essential variables
# \date 2017-07-20 22:30:07+0100
#
# \param self The object
# \param prox_f Proximal operator of f; prox_f: (x, tau) ->
# prox_f(x, tau) = min_y [1/2 ||x-y||^2 + tau
# f(y)]
# \param prox_g_conj Proximal operator of g' with g' being the
# conjugate of a convex, lower-semicontinuous
# function g; typically g acts as regularizer like
# TV or Huber; prox_g_conj: (x, sigma) ->
# prox_g_conj(x, sigma) = min_y [1/2 ||x-y||^2 +
# sigma g'(y)]
# \param B Function associated to continuous linear
# operator B
# \param B_conj The conjugate of the continuous linear operator
# B
# \param L2 Squared operator norm of B, i.e.
# L2 = ||B||^2 = ||\nabla||^2 = ||div||^2;
# In 2D: L2 <= 8/h^2 (Chambolle, Pock, p.13)
# In 3D: L2 <= 16/h^2 (similar proof)
# \param x0 Initial value, 1D numpy data array
# \param alpha Regularization parameter alpha > 0.
# \param iterations Number of primal-dual iterations, int
# \param x_scale Characteristic scale of each variable. Setting
# x_scale is equivalent to reformulating the
# problem in scaled variables ``xs = x / x_scale``
# \param verbose Verbose output, bool
# \param alg_type Type of algorithm to dynamically update
# parameters for each iteration
#
def __init__(self,
prox_f,
prox_g_conj,
B,
B_conj,
L2,
x0,
alpha=0.01,
iterations=10,
x_scale=1.,
verbose=0,
alg_type="ALG2",
):
Solver.__init__(self, x0=x0, verbose=verbose, x_scale=x_scale)
# proximal operator of f
self._prox_f = prox_f
# proximal operator of g'
self._prox_g_conj = prox_g_conj
# Continuous linear operator B in regularizer term g(Bx)
self._B = B
# Conjugate operator of B, i.e. B'
self._B_conj = B_conj
# Squared operator norm of B, i.e. L2 = ||B||^2
self._L2 = float(L2)
# Regularization parameter in f(x) + alpha g(Bx)
self._alpha = float(alpha)
# Number of primal-dual iterations
self._iterations = iterations
self._alg_type = alg_type
# parameter initialization depend on chosen method
self._get_initial_tau_sigma = {
"ALG2_AHMOD": self._get_initial_tau_sigma_alg2_ahmod,
"ALG2": self._get_initial_tau_sigma_alg2,
"ALG3": self._get_initial_tau_sigma_alg3,
}
# parameter updates depend on chosen method
self._get_update_theta_tau_sigma = {
"ALG2_AHMOD": self._get_update_theta_tau_sigma_alg2_ahmod,
"ALG2": self._get_update_theta_tau_sigma_alg2,
"ALG3": self._get_update_theta_tau_sigma_alg3,
}
##
# Sets the regularization parameter alpha.
# \date 2017-08-04 18:54:24+0100
#
# \param self The object
# \param alpha Regularization parameter; scalar
#
def set_alpha(self, alpha):
self._alpha = alpha
##
# Gets the regularization parameter alpha.
# \date 2017-08-04 18:54:59+0100
#
# \param self The object
#
# \return scalar
#
def get_alpha(self):
return self._alpha
##
# Sets the squared operator norm of B, i.e. L2 = ||B||^2.
# \date 2017-08-05 19:08:33+0100
#
# \param self The object
# \param L2 scalar value > 0
#
def set_L2(self, L2):
self._L2 = L2
##
# Gets the squared operator norm of B, i.e. L2 = ||B||^2.
# \date 2017-08-05 19:09:29+0100
#
# \param self The object
#
# \return Scalar value > 0
#
def get_L2(self):
return self._L2
##
# Sets the type of algorithm to dynamically update parameters for each
# iteration.
# \date 2017-08-05 19:16:11+0100
#
# \param self The object
# \param alg_type string being either 'ALG2', 'ALG2_AHMOD' or 'ALG3'
#
def set_alg_type(self, alg_type):
self._alg_type = alg_type
##
# Gets the type of algorithm to dynamically update parameters for each
# iteration.
# \date 2017-08-05 19:17:26+0100
#
# \param self The object
#
# \return string.
#
def get_alg_type(self):
return self._alg_type
##
# Sets the number of primal-dual iterations.
# \date 2017-08-05 18:59:12+0100
#
# \param self The object
# \param iterations Number of primal-dual iterations, integer value
#
def set_iterations(self, iterations):
self._iterations = iterations
##
# Gets the number of primal-dual iterations.
# \date 2017-08-05 18:59:39+0100
#
# \param self The object
#
# \return Number of primal-dual iterations, integer value.
#
def get_iterations(self):
return self._iterations
##
# Prints the statistics of the performed optimization
# \date 2017-07-21 00:01:10+0100
#
# \param self The object
# \param fmt Format for printing numerical values
#
def print_statistics(self, fmt="%.3e"):
pass
##
# Execute solver
# \date 2017-08-05 19:10:29+0100
#
# \param self The object
#
def _run(self):
# Monitor output
if self._observer is not None:
self._observer.add_x(self.get_x())
# regularization parameter lambda as used in Chambolle2011
lmbda = 1. / self._alpha
# Dynamic step sizes for primal and dual variable, see p.127
tau_n, sigma_n, gamma = self._get_initial_tau_sigma[
self._alg_type](L2=self._L2, lmbda=lmbda)
x_n = np.array(self._x0)
x_mean = np.array(self._x0)
p_n = 0
for i in range(0, self._iterations):
# if self._verbose:
# ph.print_title("Primal-Dual iteration %d/%d" %
# (i+1, self._iterations))
# else:
ph.print_info("Primal-Dual iteration %d/%d" %
(i+1, self._iterations))
# Update dual variable
p_n = self._prox_g_conj(
p_n + sigma_n * self._B(x_mean), sigma_n)
# Update primal variable
x_np1 = self._prox_f(x_n - tau_n * self._B_conj(p_n), tau_n*lmbda)
# Update parameter
theta_n, tau_n, sigma_n = self._get_update_theta_tau_sigma[
self._alg_type](self._L2, gamma, tau_n, sigma_n)
# Update mean variable
x_mean = x_np1 + theta_n * (x_np1 - x_n)
# Prepare for next iteration
self._x = x_np1
x_n = x_np1
# Monitor output
if self._observer is not None:
self._observer.add_x(self.get_x())
self._x = x_n
##
# Gets the initial step sizes tau_0, sigma_0 and the Lipschitz parameter
# gamma according to ALG2 method in Chambolle2011, p.133
#
# tau_0 and sigma_0 such that tau_0 * sigma_0 * L^2 = 1
# \date 2017-07-18 17:57:33+0100
#
# \param self The object
# \param L2 Squared operator norm
# \param lmbda Regularization parameter
#
# \return tau0, sigma0, gamma
#
def _get_initial_tau_sigma_alg2(self, L2, lmbda):
# Initial values according to ALG2 in Chambolle2011
tau0 = 1. / np.sqrt(L2)
sigma0 = 1. / (L2 * tau0)
gamma = 0.35 * lmbda
return tau0, sigma0, gamma
##
# Gets the update of the variable relaxation parameter
# \f$\theta_n\in[0,1]\f$ and the dynamic step sizes
# \f$\tau_n,\,\sigma_n>0\f$ for the primal and dual variable, respectively.
#
# Update is performed according to ALG2 in Chambolle2011, p.133. It always
# holds tau_n * sigma_n * L^2 = 1.
# \date 2017-07-18 18:16:28+0100
#
# \param self The object
# \param L2 Squared operator norm
# \param gamma Lipschitz parameter
# \param tau_n Dynamic step size for primal variable
# \param sigma_n Dynamic step size for dual variable
#
# \return theta_n, tau_n, sigma_n update
#
def _get_update_theta_tau_sigma_alg2(self, L2, gamma, tau_n, sigma_n):
theta_n = 1. / np.sqrt(1. + 2. * gamma * tau_n)
tau_n = tau_n * theta_n
sigma_n = sigma_n / theta_n
return theta_n, tau_n, sigma_n
##
# Gets the initial step sizes tau_0, sigma_0 and the Lipschitz parameter
# gamma according to ALG2 method in Chambolle2011, p.136
#
# tau_0 and sigma_0 such that tau_0 * sigma_0 * L^2 = 1
# \date 2017-07-18 17:57:33+0100
#
# \param self The object
# \param L2 Squared operator norm
# \param lmbda Regularization parameter
#
# \return tau0, sigma0, theta
#
def _get_initial_tau_sigma_alg3(self, L2, lmbda, huber_alpha=0.05):
# Initial values according to ALG3 in Chambolle2011
gamma = lmbda
delta = huber_alpha
mu = 2. * np.sqrt(gamma * delta / L2)
# relaxation parameter in [1/(1+mu), 1]
theta = 1. / (1. + mu)
# step size dual variable
sigma = mu / (2. * delta)
# step size primal variable
tau = mu / (2. * gamma)
return tau, sigma, theta
##
# Gets the update of the variable relaxation parameter
# \f$\theta_n\in[0,1]\f$ and the dynamic step sizes
# \f$\tau_n,\,\sigma_n>0\f$ for the primal and dual variable, respectively.
#
# Update is performed according to ALG2 in Chambolle2011, p.136. It always
# holds tau_n * sigma_n * L^2 = 1.
# \date 2017-07-18 18:16:28+0100
#
# \param self The object
# \param L2 Squared operator norm
# \param gamma Lipschitz parameter
# \param tau_n Dynamic step size for primal variable
# \param sigma_n Dynamic step size for dual variable
#
# \return theta_n, tau_n, sigma_n update
#
def _get_update_theta_tau_sigma_alg3(self, L2, gamma, tau_n, sigma_n):
theta_n = gamma # gamma is used as place holder for tau
return theta_n, tau_n, sigma_n
##
# Gets the initial step sizes tau_0, sigma_0 and the Lipschitz parameter
# gamma according to AHMOD, i.e. Arrow-Hurwicz method, in Chambolle2011,
# p.133
#
# tau_0 and sigma_0 such that tau_0 * sigma_0 * L^2 = 4
# \date 2017-07-18 17:56:36+0100
#
# \param self The object
# \param L2 Squared operator norm
# \param lmbda Regularization parameter
#
# \return tau0, sigma0, gamma
#
def _get_initial_tau_sigma_alg2_ahmod(self, L2, lmbda):
# Initial values according to AHMOD in Chambolle2011
tau0 = 0.02
sigma0 = 4. / (L2 * tau0)
gamma = 0.35 * lmbda
return tau0, sigma0, gamma
##
# Gets the update of the variable relaxation parameter
# \f$\theta_n\in[0,1]\f$ and the dynamic step sizes
# \f$\tau_n,\,\sigma_n>0\f$ for the primal and dual variable, respectively.
#
# Update is performed according to AHMOD, i.e. Arrow-Hurwicz method, in
# Chambolle2011, p.133. It always holds tau_n * sigma_n * L^2 = 4.
# \date 2017-07-18 18:16:28+0100
#
# \param self The object
# \param L2 Squared operator norm
# \param gamma Lipschitz parameter
# \param tau_n Dynamic step size for primal variable
# \param sigma_n Dynamic step size for dual variable
#
# \return theta_n, tau_n, sigma_n update
#
def _get_update_theta_tau_sigma_alg2_ahmod(self,
L2, gamma, tau_n, sigma_n):
theta_n = 1. / np.sqrt(1. + 2. * gamma * tau_n)
tau_n = tau_n * theta_n
sigma_n = sigma_n / theta_n
return 0., tau_n, sigma_n | PypiClean |
/GradAttack-0.1.2.tar.gz/GradAttack-0.1.2/gradattack/datamodules.py | import os
from typing import Optional
import numpy as np
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_lightning.core.datamodule import LightningDataModule
from torch.utils.data import Subset
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import Sampler
from torchvision.datasets.cifar import CIFAR10
from torchvision.datasets import MNIST
DEFAULT_DATA_DIR = "./data"
DEFAULT_NUM_WORKERS = 32
TRANSFORM_IMAGENET = [
transforms.Resize(40),
transforms.RandomCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
def train_val_split(dataset_size: int, val_train_split: float = 0.02):
validation_split = int((1 - val_train_split) * dataset_size)
train_indices = range(dataset_size)
train_indices, val_indices = (
train_indices[:validation_split],
train_indices[validation_split:],
)
return train_indices, val_indices
def extract_attack_set(
dataset: Dataset,
sample_per_class: int = 5,
multi_class=False,
total_num_samples: int = 50,
seed: int = None,
):
if not multi_class:
num_classes = len(dataset.classes)
class2sample = {i: [] for i in range(num_classes)}
select_indices = []
if seed == None:
index_pool = range(len(dataset))
else:
index_pool = np.random.RandomState(seed=seed).permutation(
len(dataset))
for i in index_pool:
current_class = dataset[i][1]
if len(class2sample[current_class]) < sample_per_class:
class2sample[current_class].append(i)
select_indices.append(i)
elif len(select_indices) == sample_per_class * num_classes:
break
return select_indices, class2sample
else:
select_indices = range(total_num_samples)
class2sample = None
return select_indices, class2sample
class FileDataModule(LightningDataModule):
def __init__(
self,
data_dir: str = DEFAULT_DATA_DIR,
transform: torch.nn.Module = transforms.Compose(TRANSFORM_IMAGENET),
batch_size: int = 32,
num_workers: int = DEFAULT_NUM_WORKERS,
batch_sampler: Sampler = None,
):
self.data_dir = data_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.transform = transform
self.batch_sampler = batch_sampler
def setup(self, stage: Optional[str] = None):
self.dataset = datasets.ImageFolder(self.data_dir,
transform=self.transform)
def get_dataloader(self):
return DataLoader(self.dataset,
batch_size=self.batch_size,
num_workers=self.num_workers)
def train_dataloader(self):
return self.get_dataloader()
def test_dataloader(self):
return self.get_dataloader()
class ImageNetDataModule(LightningDataModule):
def __init__(
self,
augment: dict = None,
data_dir: str = os.path.join(DEFAULT_DATA_DIR, "imagenet"),
batch_size: int = 32,
num_workers: int = DEFAULT_NUM_WORKERS,
batch_sampler: Sampler = None,
tune_on_val: bool = False,
):
self.data_dir = data_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.num_classes = 1000
self.multi_class = False
self.batch_sampler = batch_sampler
self.tune_on_val = tune_on_val
print(data_dir)
imagenet_normalize = transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))
self._train_transforms = [
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
imagenet_normalize,
]
if augment["hflip"]:
self._train_transforms.insert(
0, transforms.RandomHorizontalFlip(p=0.5))
if augment["color_jitter"] is not None:
self._train_transforms.insert(
0,
transforms.ColorJitter(
brightness=augment["color_jitter"][0],
contrast=augment["color_jitter"][1],
saturation=augment["color_jitter"][2],
hue=augment["color_jitter"][3],
),
)
if augment["rotation"] > 0:
self._train_transforms.insert(
0, transforms.RandomRotation(augment["rotation"]))
if augment["crop"]:
self._train_transforms.insert(0,
transforms.RandomCrop(32, padding=4))
print(self._train_transforms)
self._test_transforms = [
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
imagenet_normalize,
]
def setup(self, stage: Optional[str] = None):
"""Initialize the dataset based on the stage option ('fit', 'test' or 'attack'):
- if stage is 'fit', set up the training and validation dataset;
- if stage is 'test', set up the testing dataset;
- if stage is 'attack', set up the attack dataset (a subset of training images)
Args:
stage (Optional[str], optional): stage option. Defaults to None.
"""
if stage == "fit" or stage is None:
self.train_set = datasets.ImageFolder(
os.path.join(self.data_dir, "train"),
transform=transforms.Compose(self._train_transforms),
)
if self.tune_on_val:
self.val_set = datasets.ImageFolder(
os.path.join(self.data_dir, "train"),
transform=transforms.Compose(self._test_transforms),
)
train_indices, val_indices = train_val_split(
len(self.train_set), self.tune_on_val)
self.train_set = Subset(self.train_set, train_indices)
self.val_set = Subset(self.val_set, val_indices)
else: # use test set
self.val_set = datasets.ImageFolder(
os.path.join(self.data_dir, "val"),
transform=transforms.Compose(self._test_transforms),
)
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.test_set = datasets.ImageFolder(
os.path.join(self.data_dir, "val"),
transform=transforms.Compose(self._test_transforms),
)
if stage == "attack":
ori_train_set = datasets.ImageFolder(
os.path.join(self.data_dir, "attack"),
transform=transforms.Compose(self._train_transforms),
)
self.attack_indices, self.class2attacksample = extract_attack_set(
ori_train_set)
self.train_set = Subset(ori_train_set, self.attack_indices)
def train_dataloader(self):
if self.batch_sampler is None:
return DataLoader(self.train_set,
batch_size=self.batch_size,
num_workers=self.num_workers)
else:
return DataLoader(
self.train_set,
batch_sampler=self.batch_sampler,
num_workers=self.num_workers,
)
def val_dataloader(self):
return DataLoader(self.val_set,
batch_size=self.batch_size,
num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.test_set,
batch_size=self.batch_size,
num_workers=self.num_workers)
class MNISTDataModule(LightningDataModule):
def __init__(
self,
augment: dict = None,
batch_size: int = 32,
data_dir: str = DEFAULT_DATA_DIR,
num_workers: int = DEFAULT_NUM_WORKERS,
batch_sampler: Sampler = None,
tune_on_val: float = 0,
):
super().__init__()
self._has_setup_attack = False
self.data_dir = data_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.dims = (3, 32, 32)
self.num_classes = 10
self.batch_sampler = batch_sampler
self.tune_on_val = tune_on_val
self.multi_class = False
mnist_normalize = transforms.Normalize((0.1307, ), (0.3081, ))
self._train_transforms = [
transforms.Resize(32),
transforms.Grayscale(3),
transforms.ToTensor(),
mnist_normalize,
]
if augment["hflip"]:
self._train_transforms.insert(
0, transforms.RandomHorizontalFlip(p=0.5))
if augment["color_jitter"] is not None:
self._train_transforms.insert(
0,
transforms.ColorJitter(
brightness=augment["color_jitter"][0],
contrast=augment["color_jitter"][1],
saturation=augment["color_jitter"][2],
hue=augment["color_jitter"][3],
),
)
if augment["rotation"] > 0:
self._train_transforms.insert(
0, transforms.RandomRotation(augment["rotation"]))
if augment["crop"]:
self._train_transforms.insert(0,
transforms.RandomCrop(32, padding=4))
print(self._train_transforms)
self._test_transforms = [
transforms.Resize(32),
transforms.Grayscale(3),
transforms.ToTensor(),
mnist_normalize,
]
self.prepare_data()
def prepare_data(self):
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage: Optional[str] = None):
"""Initialize the dataset based on the stage option ('fit', 'test' or 'attack'):
- if stage is 'fit', set up the training and validation dataset;
- if stage is 'test', set up the testing dataset;
- if stage is 'attack', set up the attack dataset (a subset of training images)
Args:
stage (Optional[str], optional): stage option. Defaults to None.
"""
if stage == "fit" or stage is None:
self.train_set = MNIST(
self.data_dir,
train=True,
transform=transforms.Compose(self._train_transforms),
)
if self.tune_on_val:
self.val_set = MNIST(
self.data_dir,
train=True,
transform=transforms.Compose(self._test_transforms),
)
train_indices, val_indices = train_val_split(
len(self.train_set), self.tune_on_val)
self.train_set = Subset(self.train_set, train_indices)
self.val_set = Subset(self.val_set, val_indices)
else:
self.val_set = MNIST(
self.data_dir,
train=False,
transform=transforms.Compose(self._test_transforms),
)
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.test_set = MNIST(
self.data_dir,
train=False,
transform=transforms.Compose(self._test_transforms),
)
if stage == "attack":
ori_train_set = MNIST(
self.data_dir,
train=True,
transform=transforms.Compose(self._train_transforms),
)
self.attack_indices, self.class2attacksample = extract_attack_set(
ori_train_set)
self.train_set = Subset(ori_train_set, self.attack_indices)
self.test_set = Subset(self.test_set, range(100))
elif stage == "attack_mini":
ori_train_set = MNIST(
self.data_dir,
train=True,
transform=transforms.Compose(self._train_transforms),
)
self.attack_indices, self.class2attacksample = extract_attack_set(
ori_train_set, sample_per_class=2)
self.train_set = Subset(ori_train_set, self.attack_indices)
self.test_set = Subset(self.test_set, range(100))
elif stage == "attack_large":
ori_train_set = MNIST(
self.data_dir,
train=True,
transform=transforms.Compose(self._train_transforms),
)
self.attack_indices, self.class2attacksample = extract_attack_set(
ori_train_set, sample_per_class=500)
self.train_set = Subset(ori_train_set, self.attack_indices)
self.test_set = Subset(self.test_set, range(100))
def train_dataloader(self):
if self.batch_sampler is None:
return DataLoader(
self.train_set,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=True,
)
else:
return DataLoader(
self.train_set,
batch_sampler=self.batch_sampler,
num_workers=self.num_workers,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(self.val_set,
batch_size=self.batch_size,
num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.test_set,
batch_size=self.batch_size,
num_workers=self.num_workers)
class CIFAR10DataModule(LightningDataModule):
def __init__(
self,
augment: dict = None,
batch_size: int = 32,
data_dir: str = DEFAULT_DATA_DIR,
num_workers: int = DEFAULT_NUM_WORKERS,
batch_sampler: Sampler = None,
tune_on_val: float = 0,
seed: int = None,
):
super().__init__()
self._has_setup_attack = False
self.data_dir = data_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.dims = (3, 32, 32)
self.num_classes = 10
self.batch_sampler = batch_sampler
self.tune_on_val = tune_on_val
self.multi_class = False
self.seed = seed
cifar_normalize = transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))
self._train_transforms = [transforms.ToTensor(), cifar_normalize]
if augment["hflip"]:
self._train_transforms.insert(
0, transforms.RandomHorizontalFlip(p=0.5))
if augment["color_jitter"] is not None:
self._train_transforms.insert(
0,
transforms.ColorJitter(
brightness=augment["color_jitter"][0],
contrast=augment["color_jitter"][1],
saturation=augment["color_jitter"][2],
hue=augment["color_jitter"][3],
),
)
if augment["rotation"] > 0:
self._train_transforms.insert(
0, transforms.RandomRotation(augment["rotation"]))
if augment["crop"]:
self._train_transforms.insert(0,
transforms.RandomCrop(32, padding=4))
print(self._train_transforms)
self._test_transforms = [transforms.ToTensor(), cifar_normalize]
self.prepare_data()
def prepare_data(self):
"""Download the data"""
CIFAR10(self.data_dir, train=True, download=True)
CIFAR10(self.data_dir, train=False, download=True)
def setup(self, stage: Optional[str] = None):
"""Initialize the dataset based on the stage option ('fit', 'test' or 'attack'):
- if stage is 'fit', set up the training and validation dataset;
- if stage is 'test', set up the testing dataset;
- if stage is 'attack', set up the attack dataset (a subset of training images)
Args:
stage (Optional[str], optional): stage option. Defaults to None.
"""
if stage == "fit" or stage is None:
self.train_set = CIFAR10(
self.data_dir,
train=True,
transform=transforms.Compose(self._train_transforms),
)
if self.tune_on_val:
self.val_set = CIFAR10(
self.data_dir,
train=True,
transform=transforms.Compose(self._test_transforms),
)
train_indices, val_indices = train_val_split(
len(self.train_set), self.tune_on_val)
self.train_set = Subset(self.train_set, train_indices)
self.val_set = Subset(self.val_set, val_indices)
else:
self.val_set = CIFAR10(
self.data_dir,
train=False,
transform=transforms.Compose(self._test_transforms),
)
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.test_set = CIFAR10(
self.data_dir,
train=False,
transform=transforms.Compose(self._test_transforms),
)
if stage == "attack":
ori_train_set = CIFAR10(
self.data_dir,
train=True,
transform=transforms.Compose(self._train_transforms),
)
self.attack_indices, self.class2attacksample = extract_attack_set(
ori_train_set, seed=self.seed)
self.train_set = Subset(ori_train_set, self.attack_indices)
self.test_set = Subset(self.test_set, range(100))
elif stage == "attack_mini":
ori_train_set = CIFAR10(
self.data_dir,
train=True,
transform=transforms.Compose(self._train_transforms),
)
self.attack_indices, self.class2attacksample = extract_attack_set(
ori_train_set, sample_per_class=2)
self.train_set = Subset(ori_train_set, self.attack_indices)
self.test_set = Subset(self.test_set, range(100))
elif stage == "attack_large":
ori_train_set = CIFAR10(
self.data_dir,
train=True,
transform=transforms.Compose(self._train_transforms),
)
self.attack_indices, self.class2attacksample = extract_attack_set(
ori_train_set, sample_per_class=500)
self.train_set = Subset(ori_train_set, self.attack_indices)
self.test_set = Subset(self.test_set, range(100))
def train_dataloader(self):
if self.batch_sampler is None:
return DataLoader(self.train_set,
batch_size=self.batch_size,
num_workers=self.num_workers)
else:
return DataLoader(
self.train_set,
batch_sampler=self.batch_sampler,
num_workers=self.num_workers,
)
def val_dataloader(self):
return DataLoader(self.val_set,
batch_size=self.batch_size,
num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.test_set,
batch_size=self.batch_size,
num_workers=self.num_workers) | PypiClean |
/Cola-0.1.0b0.tar.gz/Cola-0.1.0b0/cola/core/counter.py | import threading
class Aggregator(object):
def create_combiner(self, val):
raise NotImplementedError
def merge_combiner(self, combiner1, combiner2):
raise NotImplementedError
def merge_val(self, combiner, val):
raise NotImplementedError
class AddAggregator(Aggregator):
def create_combiner(self, val):
return val
def merge_combiner(self, combiner1, combiner2):
return combiner1 + combiner2
def merge_val(self, combiner, val):
return combiner + val
class MergeAggregator(Aggregator):
def create_combiner(self, val):
return [val, ]
def merge_combiner(self, combiner1, combiner2):
combiner1.extend(combiner2)
return combiner1
def merge_val(self, combiner, val):
combiner.append(val)
return combiner
class UniqAggregator(Aggregator):
def create_combiner(self, val):
return set(val)
def merge_combiner(self, combiner1, combiner2):
combiner1 |= combiner2
return combiner1
def merge_val(self, combiner, val):
combiner.add(val)
return combiner
class OverwriteAggregator(Aggregator):
def create_combiner(self, val):
return val
def merge_combiner(self, combiner1, combiner2):
return combiner2
def merge_val(self, combiner, val):
return val
class Counter(object):
def __init__(self, agg=AddAggregator(), container=None):
self.container = container if container is not None else dict()
self.agg = agg
self.lock = threading.Lock()
def inc(self, group, item, val=1):
with self.lock:
if group not in self.container:
self.container[group] = {}
if item not in self.container[group]:
self.container[group][item] = self.agg.create_combiner(val)
else:
src_combiner = self.container[group][item]
self.container[group][item] = \
self.agg.merge_val(src_combiner, val)
def get(self, group, item, default_val=None):
if group not in self.container:
return default_val
return self.container[group].get(item, default_val)
def merge(self, other_counter):
if self.agg.__class__ != other_counter.agg.__class__:
raise ValueError('merged counter must have the same aggregator class')
with self.lock:
for group, kv in other_counter.container.iteritems():
for item, val in kv.iteritems():
if group not in self.container:
self.container[group] = {}
if item not in self.container[group]:
self.container[group][item] = val
else:
self.container[group][item] = self.agg.merge_combiner(
self.container[group][item], val)
def reset(self, container=None):
with self.lock:
self.container = container if container is not None else dict() | PypiClean |
/ChadBot6-0.1-py3-none-any.whl/ChadBot/core/FAQ.py | from typing import List, Union, Dict, Tuple
import os
import numpy as np
from .exceptions import *
from .generation import GenerateManager
import pickle
def save_dict(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(path):
with open(path, 'rb') as f:
return pickle.load(f)
class Sentence:
def __init__(self, text : str):
self.text = text
class Question(Sentence):
def __init__(self,text : str, label : int, orignalParaPhrases : List[str] = None):
super().__init__(text)
self.label = label
self.orignalParaPhrases = orignalParaPhrases or []
def __str__(self) -> str:
return "{} {}".format(self.text, self.label)
class Answer(Sentence):
def __init__(self,text : str , label : int):
super().__init__(text)
self.label = label
def __str__(self) -> str:
return "{} {}".format(self.text, self.label)
class FAQUnit:
def __init__(self,id : int,question : Question,orignal : Question , answer : Answer):
"""
ID must be unique for each question(generated or orignal) !!!!
question is the question itself , if the question is generated from some orignal
question then orignal will have the question , otherwise orignal will be same as the question
answer ofcourse id the answer to the question
"""
assert question.label == orignal.label , "label for orignal and current question must be same"
self.id = id
self.question = question
self.orignal = orignal
self.answer = answer
self.vectorRep = None # To be assigned later using a model
self.label = question.label
def hasAssignedVector(self):
if(self.vectorRep is None):
return False
return True
def __str__(self) -> str:
return "Question ---> {}\nAnswer --->{}\n".format(self.question, self.answer)
def __repr__(self) -> str:
return self.__str__()
def processRaw(questions : List[str], answers : List[str]) -> Tuple[List[Question], List[Answer]]:
assert len(questions) == len(answers)
assert len(questions) == len(set(questions)) , "Duplicate questions not allowed"
# Now two or more questions may have the same answer !!! ,How ever it is not recommended !!!
# You can add your own augmentations later !!!
a2L = dict()
label = 0
for answer in answers:
if(answer not in a2L):
a2L[answer] = label
label += 1
l2Q : Dict[int, Question] = dict()
for question, answer in zip(questions,answers):
label = a2L[answer]
if(label in l2Q):
# HAndeling if multiple question map to the same answer !!!
l2Q[label].orignalParaPhrases.append(question)
else:
# Creating new question
l2Q[label] = Question(label= label, text = question)
# print(label)
# print(l2Q[label].orignalParaPhrases)
outAnswers = []
for answer , label in a2L.items():
outAnswers.append(Answer(label= label , text= answer))
outQuestions = list(l2Q.values())
#for q in outQuestions:
# print(len(q.orignalParaPhrases))
return outQuestions, outAnswers
class FAQ:
def __init__(self,name : str,questions : List[str] = None, answers : List[str] = None):
self.name = name
self.questions : List[Question] = None
self.answers : List[Answer] = None
self.l2Q : Dict[int,Question] = dict()
self.l2A : Dict[int, Answer] = dict()
self.FAQ : List[FAQUnit] = None
if(not (questions is None or answers is None)):
assert len(questions) == len(answers)
self.questions, self.answers = processRaw(questions= questions, answers= answers)
self._runChecks()
self._buildDicts()
#########################################################################
def _runChecks(self):
"""
Running checks on questions and answers
might add other checks like min/max sentence length etc
"""
includedSet = set()
for question in self.questions:
if(question.label in includedSet):
raise WrongInputDataException("(ONLY FOR FAQ CLASS ERROR)Label for each question must be unique, found atleast 2 questions with same label")
includedSet.add(question.label)
answerlabelsSet = set()
for answer in self.answers:
if(answer.label not in includedSet):
raise WrongInputDataException("Label {} for answer {} does not match any question label , please remove this answer from all answer as it will never be answered anyway !!!".format(answer.label, answer.text))
if(answer.label in answerlabelsSet):
raise WrongInputDataException("Label for each answer must be unique (the text might be the same !!) but found at least two answers with label {}".format(answer.label))
answerlabelsSet.add(answer.label)
# checking if any label in question points to no answer at all !!!
differenceSet = includedSet - answerlabelsSet
if(len(differenceSet) > 0):
raise WrongInputDataException("The following questions labels have no corrosponding answer labels {} , please check the input data".format(differenceSet))
def _buildDicts(self):
"""
To build label to question/answers mappings
"""
if(self.isEmpty()):
raise AttemptedUsingEmptyFAQ()
for question in self.questions:
self.l2Q[question.label] = question
for answer in self.answers:
self.l2A[answer.label] = answer
def getAnswerWithLabel(self, label : int) -> Answer:
"""
Returns answer for the particular label
"""
if(self.isEmpty()):
raise AttemptedUsingEmptyFAQ()
if(label not in self.l2A):
raise LabelDoesNotExists("The answer with label {} does not exists".format(label))
return self.l2A[label]
def getQuestionWithLabel(self, label : int) -> Question:
"""
Returns all orignal questions for the particular label
"""
if(self.isEmpty()):
raise AttemptedUsingEmptyFAQ()
if(label not in self.l2Q):
raise LabelDoesNotExists("The question with label {} does not exists".format(label))
return self.l2Q[label]
def _paraphrase(self, generator : GenerateManager):
"""
Takes the questions and answers and generated more question using the generator
given , the number of questions to generate and other settings are to be applied in the
generator itself
Populated the self.FAQ property
"""
if(self.isEmpty()):
raise AttemptedUsingEmptyFAQ()
questionTexts= list(map(lambda q : q.text , self.questions))
generatedQuestions = generator.generate(questionTexts)
if(len(generatedQuestions) != len(self.questions)):
raise WrongGeneratorOutputException("number of orignal questions is {} but generator returned {} Lists !!!".format(len(self.questions), len(generatedQuestions)))
tempFAQ = []
idCount = 0
for orignalQuestion , paraphrases in zip(self.questions, generatedQuestions):
label = orignalQuestion.label
answer = self.getAnswerWithLabel(label)
tempFAQ.append(FAQUnit(idCount,orignalQuestion,orignalQuestion,answer= answer))
idCount += 1
for parphrase in paraphrases:
question = Question(label= label , text= parphrase)
tempFAQ.append(FAQUnit(id= idCount,question= question, orignal= orignalQuestion,answer= answer))
idCount += 1
for orignalParaphrase in orignalQuestion.orignalParaPhrases:
question = Question(label = label , text =orignalParaphrase)
tempFAQ.append(FAQUnit(id = idCount, question= question, orignal= orignalQuestion,answer= answer))
idCount += 1
self.FAQ = tempFAQ
def _assignVectors(self,model):
"""
Using the model to assign vectors to each generated question
"""
if(self.isEmpty()):
raise AttemptedUsingEmptyFAQ()
if(self.isUsable() == False):
raise AttemptedUsingNonUsableFAQ("Must generate paraphrases before assigning vectors")
questions = [q.question.text for q in self.FAQ]
vectors : List[np.ndarray] = model.encode(questions)
if(len(vectors) != len(questions)):
raise WrongModelOutputException("The size of list of sentences input to model and that of output do not match , {} != {}".format(len(questions,len(vectors))))
for i,vector in enumerate(vectors):
self.FAQ[i].vectorRep = vector
def buildFAQ(self,generator : GenerateManager,model = None):
"""
Will generate questions , and then the vectorrep of each question,
WILL NOT SAVE , MUST CALL SAVE
"""
if(self.isEmpty()):
raise AttemptedUsingEmptyFAQ()
self._paraphrase(generator = generator)
if(model is not None):
self._assignVectors(model = model)
def isEmpty(self):
if(self.questions is None or self.answers is None or len(self.questions) == 0 or len(self.answers) == 0):
return True
return False
def isUsable(self):
if(self.isEmpty()):
return False
if(self.FAQ is None or len(self.FAQ) == 0):
return False
return True
def hasVectorsAssigned(self):
if(self.isUsable() == False):
return False
for unit in self.FAQ:
if(unit.hasAssignedVector() == False):
return False
return True
def load(self,rootDirPath):
newObj = load_dict(os.path.join(rootDirPath,self.name + ".pkl"))
# replacing all the vars
self.__dict__.update(newObj.__dict__)
def save(self, rootDirPath):
save_dict(self,os.path.join(rootDirPath,self.name + ".pkl"))
def resetAssignedVectors(self, rootDirPath):
if(self.FAQ is None):
return
for unit in self.FAQ:
unit.vectorRep = None
self.save(rootDirPath)
def resetFAQ(self, rootDirPath):
self.FAQ = None
self.l2A = dict()
self.l2Q = dict()
self.save(rootDirPath)
def __len__(self):
if(self.isEmpty()):
raise AttemptedUsingEmptyFAQ()
if(self.isUsable() == False):
raise ValueError("cannot access length of a faq whose questions have not been generated !!! MUST CALL BUILD FAQ BEFORE THIS, OR LOAD FAQ FROM PREEXISTING SOURCE")
return len(self.FAQ)
class FAQOutput:
def __init__(self,answer : Union[Answer,None],question : Question ,faqName : str,faqId : int,score : float, similarQuestions : List[str], maxScore : float):
"""
Answer == None means the question was out of set
"""
self.answer = answer
self.faqId = faqId
self.faqName = faqName
self.score = score
self.similarQuestions = similarQuestions
self.maxScore = maxScore | PypiClean |
/Flask-Docker-0.2.0.tar.gz/Flask-Docker-0.2.0/README.rst | |Build Status| |Coverage Status| |PyPI Version| |Wheel Status|
Flask-Docker
============
Flask-Docker is an extension for Flask_ that integrates Docker_ client into
your application. It is based on the official client docker-py_.
.. _Flask: http://flask.pocoo.org
.. _Docker: https://www.docker.com
.. _docker-py: https://github.com/docker/docker-py#readme
The document is in ReadTheDocs_.
.. _ReadTheDocs: https://flask-docker.readthedocs.org
Installation
------------
::
pip install Flask-Docker
Issues
------
If you want to report bugs or request features, please create issues on
`GitHub Issues <https://github.com/tonyseek/flask-docker/issues>`_.
Contributes
-----------
You can send a pull reueqst on
`GitHub <https://github.com/tonyseek/flask-docker/pulls>`_.
.. |Build Status| image:: https://img.shields.io/travis/tonyseek/flask-docker.svg?style=flat
:target: https://travis-ci.org/tonyseek/flask-docker
:alt: Build Status
.. |Coverage Status| image:: https://img.shields.io/coveralls/tonyseek/flask-docker.svg?style=flat
:target: https://coveralls.io/r/tonyseek/flask-docker
:alt: Coverage Status
.. |Wheel Status| image:: https://img.shields.io/pypi/wheel/Flask-Docker.svg?style=flat
:target: https://warehouse.python.org/project/Flask-Docker
:alt: Wheel Status
.. |PyPI Version| image:: https://img.shields.io/pypi/v/Flask-Docker.svg?style=flat
:target: https://pypi.python.org/pypi/Flask-Docker
:alt: PyPI Version
| PypiClean |
/KiMoPack-7.0.14.tar.gz/KiMoPack-7.0.14/docs/source/Introduction.rst | Introduction
=============
KiMoPack is a project for the handling of spectral data measure at
multiple time-points. The current design is optimised for the use with
optical transient absorption data, but it has been successfully adapted
for the use with transient x-ray emission and spectro-electro chemistry
data.
It focuses on the main tasks an experimentator has
Loading and shaping of experiments, plotting of experiments, comparing of experiments,
analysing experiments with fast and/or advanced fitting routines and saving/exporting/presenting
the results.
The software can be used on several different levels. The simplest level packs everything
into an object "TA" that contains all the parameters that are typically set.
These objects also contain the typical functions that are used in an analysis.
See :ref:`Main Tasks overview` for an overview of these functions.
All active functions have a capital letter in the beginning.
At the lower levels a series of convenience functions for the efficient plotting of
one or two dimensional data is provided. These are typical in the main module
For typical use a series of juypter notebooks are provided that guide
through the a number of different use scenarios, and are suggesting the
parameter that are typically set.
In addition a series of tutorial notebooks are provided that guide the user through the different functions. These Tutorials can either be downloaded or executed on a "mybinder" server via this badge.
.. image:: https://mybinder.org/badge_logo.svg
:target: https://mybinder.org/v2/gh/erdzeichen/KiMoPack/HEAD
In addition a small series of videos were produced to introduce the features and usage of KiMoPack: https://www.youtube.com/channel/UCmhiK0P9wXXjs_PJaitx8BQ
| PypiClean |
/ChemListem-0.1.0.tar.gz/ChemListem-0.1.0/chemlistem/ensemblemodel.py | import sys
from collections import defaultdict
from datetime import datetime
from .tradmodel import TradModel, get_trad_model
from .minimodel import MiniModel, get_mini_model
defaultmodel = None
defaultmodelgpu = False
def get_ensemble_model(version="0.1.0", gpu=False):
"""
Gets the default ensemble model - by getting the constituent models, downloading if necessary.
Args:
version: the version number on BitBucket.
gpu: whether to use CuDNNLSTM.
Returns:
An EnsembleModel
"""
global defaultmodel, defaultmodelgpu
if defaultmodel is not None and gpu == defaultmodelgpu: return defaultmodel
defaultmodelgpu = gpu
em = EnsembleModel(get_trad_model(version, gpu), get_mini_model(version, gpu))
defaultmodel = em
return defaultmodel
class EnsembleModel(object):
"""
A simple ensemble, consisting of a TradModel and a MiniModel.
"""
def __init__(self, tradmodel, minimodel):
"""
Set up the ensemble.
Args:
tradmodel: the TradModel
minimodel: the MiniModel
"""
self.tradmodel = tradmodel
self.minimodel = minimodel
print("Ensemble Model ready at", datetime.now(), file=sys.stderr)
def process(self, str, threshold=0.475, domonly=True):
"""
Find named entities in a string.
Entities are returned as tuples:
(start_charater_position, end_character_position, string, score, is_dominant)
Entities are dominant if they are not partially or wholly overlapping with a higher-scoring entity.
Args:
str: the string to find entities in.
threshold: the minimum score for entities.
domonly: if True, discard non-dominant entities.
"""
subthresh = threshold / 10
r1 = self.tradmodel.process(str, subthresh, False)
r2 = self.minimodel.process(str, subthresh, False)
pos_to_ents = defaultdict(list)
for e in r1:
pos = (e[0], e[1])
pos_to_ents[pos].append(e)
for e in r2:
pos = (e[0], e[1])
pos_to_ents[pos].append(e)
nents = []
for e in pos_to_ents:
score = sum([i[3] for i in pos_to_ents[e]]) / 2
if score >= threshold:
nents.append([e[0], e[1], pos_to_ents[e][0][2], score, False])
se = sorted(nents, key=lambda x:-x[3])
uu = [False for i in range(len(str))]
for e in se:
dom = True
for i in range(e[0],e[1]):
if uu[i]:
dom = False
break
if dom:
for i in range(e[0], e[1]): uu[i] = True
e[4] = True
if domonly: nents = [i for i in nents if i[4]]
nents = sorted([tuple(i) for i in nents])
return nents
def batchprocess(self, instrs, threshold=0.475, domonly=True):
subthresh = threshold / 10
rr1 = self.tradmodel.batchprocess(instrs, subthresh, False)
rr2 = self.minimodel.batchprocess(instrs, subthresh, False)
res = []
for n in range(len(rr1)):
r1 = rr1[n]
r2 = rr2[n]
pos_to_ents = defaultdict(list)
for e in r1:
pos = (e[0], e[1])
pos_to_ents[pos].append(e)
for e in r2:
pos = (e[0], e[1])
pos_to_ents[pos].append(e)
nents = []
for e in pos_to_ents:
score = sum([i[3] for i in pos_to_ents[e]]) / 2
if score >= threshold:
nents.append([e[0], e[1], pos_to_ents[e][0][2], score, False])
se = sorted(nents, key=lambda x:-x[3])
uu = [False for i in range(len(instrs[n]))]
for e in se:
dom = True
for i in range(e[0],e[1]):
if uu[i]:
dom = False
break
if dom:
for i in range(e[0], e[1]): uu[i] = True
e[4] = True
if domonly: nents = [i for i in nents if i[4]]
nents = sorted([tuple(i) for i in nents])
res.append(nents)
return res | PypiClean |
/Gritty-0.1.3.zip/Gritty-0.1.3/gritty/demos/game_of_life.py | import pygame
import itertools
from gritty.demos import basic_grid
# gritty demo
# Copyright 2013 Joe Cross
# This is free software, released under The GNU Lesser General Public License, version 3.
# You are free to use, distribute, and modify pyGrid. If modification is your game,
# it is recommended that you read the GNU LGPL license: http://www.gnu.org/licenses/
caption = "Click to toggle, space to pause/resume"
grid, display, COLOR_OFF, COLOR_ON = basic_grid(caption)
paused = True
dimensions = grid.rows * grid.columns
active_grid, buffer_grid = [False] * dimensions, [False] * dimensions
index = lambda x, y: x + grid.columns * y
initial = [
(11, 15),
(12, 15),
(12, 16),
(16, 16),
(17, 16),
(18, 16),
(17, 14),
]
def alive_to_color(value, cell):
cell.color = COLOR_ON if value else COLOR_OFF
return value
grid.cell_attr['alive'] = False
grid.cell_attr_coercion_funcs['alive'] = alive_to_color
for x, y in initial:
grid[x, y].alive = active_grid[index(x, y)] = True
def wrap(val, L1, L2):
"""Returns the wrapped integer in [min(L1, L2), max(L1, L2)]"""
low, high = min(L1, L2), max(L1, L2)
nlow = -low
return ((val + nlow) % (high + nlow)) - nlow
cells = list(itertools.product(xrange(grid.rows), xrange(grid.columns)))
offsets = list(itertools.product([-1, 0, 1], [-1, 0, 1]))
offsets.remove((0, 0))
def update_cell((x, y)):
global buffer_grid
count = 0
for xo, yo in offsets:
nx = wrap(x + xo, 0, grid.rows)
ny = wrap(y + yo, 0, grid.columns)
count += active_grid[nx + grid.columns * ny]
alive = active_grid[index(x, y)]
if alive:
if count < 2 or count > 3:
buffer_grid[index(x, y)] = False
else:
buffer_grid[index(x, y)] = True
else:
if count == 3:
buffer_grid[index(x, y)] = True
else:
buffer_grid[index(x, y)] = False
def calculate_next_frame():
for x, y in cells:
update_cell((x, y))
def flip_frames():
global buffer_grid, active_grid
tmp = active_grid
active_grid = buffer_grid
buffer_grid = tmp
def apply_grid():
for x, y in cells:
grid[x, y].alive = active_grid[index(x, y)]
def draw_grid():
display.get_surface().blit(grid.surface, (0, 0))
while True:
event = pygame.event.poll()
if event.type == pygame.QUIT:
break
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
break
elif event.key == pygame.K_SPACE:
paused = not paused
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # Left button only
if paused:
x, y = grid.hit_check(pygame.mouse.get_pos())
active_grid[index(x, y)] = not active_grid[index(x, y)]
grid[x, y].alive = active_grid[index(x, y)]
if not paused:
calculate_next_frame()
flip_frames()
apply_grid()
draw_grid()
pygame.display.update()
pygame.quit() | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/charting/plot2d/Stacked.js.uncompressed.js | define("dojox/charting/plot2d/Stacked", ["dojo/_base/lang", "dojo/_base/declare", "dojo/_base/array", "./Default", "./common",
"dojox/lang/functional", "dojox/lang/functional/reversed", "dojox/lang/functional/sequence"],
function(lang, declare, arr, Default, dc, df, dfr, dfs){
/*=====
var Default = dojox.charting.plot2d.Default;
=====*/
var purgeGroup = dfr.lambda("item.purgeGroup()");
return declare("dojox.charting.plot2d.Stacked", Default, {
// summary:
// Like the default plot, Stacked sets up lines, areas and markers
// in a stacked fashion (values on the y axis added to each other)
// as opposed to a direct one.
getSeriesStats: function(){
// summary:
// Calculate the min/max on all attached series in both directions.
// returns: Object
// {hmin, hmax, vmin, vmax} min/max in both directions.
var stats = dc.collectStackedStats(this.series);
this._maxRunLength = stats.hmax;
return stats;
},
render: function(dim, offsets){
// summary:
// Run the calculations for any axes for this plot.
// dim: Object
// An object in the form of { width, height }
// offsets: Object
// An object of the form { l, r, t, b}.
// returns: dojox.charting.plot2d.Stacked
// A reference to this plot for functional chaining.
if(this._maxRunLength <= 0){
return this;
}
// stack all values
var acc = df.repeat(this._maxRunLength, "-> 0", 0);
for(var i = 0; i < this.series.length; ++i){
var run = this.series[i];
for(var j = 0; j < run.data.length; ++j){
var v = run.data[j];
if(v !== null){
if(isNaN(v)){ v = 0; }
acc[j] += v;
}
}
}
// draw runs in backwards
if(this.zoom && !this.isDataDirty()){
return this.performZoom(dim, offsets);
}
this.resetEvents();
this.dirty = this.isDirty();
if(this.dirty){
arr.forEach(this.series, purgeGroup);
this._eventSeries = {};
this.cleanGroup();
var s = this.group;
df.forEachRev(this.series, function(item){ item.cleanGroup(s); });
}
var t = this.chart.theme, events = this.events(),
ht = this._hScaler.scaler.getTransformerFromModel(this._hScaler),
vt = this._vScaler.scaler.getTransformerFromModel(this._vScaler);
for(var i = this.series.length - 1; i >= 0; --i){
var run = this.series[i];
if(!this.dirty && !run.dirty){
t.skip();
this._reconnectEvents(run.name);
continue;
}
run.cleanGroup();
var theme = t.next(this.opt.areas ? "area" : "line", [this.opt, run], true),
s = run.group, outline,
lpoly = arr.map(acc, function(v, i){
return {
x: ht(i + 1) + offsets.l,
y: dim.height - offsets.b - vt(v)
};
}, this);
var lpath = this.opt.tension ? dc.curve(lpoly, this.opt.tension) : "";
if(this.opt.areas){
var apoly = lang.clone(lpoly);
if(this.opt.tension){
var p=dc.curve(apoly, this.opt.tension);
p += " L" + lpoly[lpoly.length - 1].x + "," + (dim.height - offsets.b) +
" L" + lpoly[0].x + "," + (dim.height - offsets.b) +
" L" + lpoly[0].x + "," + lpoly[0].y;
run.dyn.fill = s.createPath(p).setFill(theme.series.fill).getFill();
} else {
apoly.push({x: lpoly[lpoly.length - 1].x, y: dim.height - offsets.b});
apoly.push({x: lpoly[0].x, y: dim.height - offsets.b});
apoly.push(lpoly[0]);
run.dyn.fill = s.createPolyline(apoly).setFill(theme.series.fill).getFill();
}
}
if(this.opt.lines || this.opt.markers){
if(theme.series.outline){
outline = dc.makeStroke(theme.series.outline);
outline.width = 2 * outline.width + theme.series.stroke.width;
}
}
if(this.opt.markers){
run.dyn.marker = theme.symbol;
}
var frontMarkers, outlineMarkers, shadowMarkers;
if(theme.series.shadow && theme.series.stroke){
var shadow = theme.series.shadow,
spoly = arr.map(lpoly, function(c){
return {x: c.x + shadow.dx, y: c.y + shadow.dy};
});
if(this.opt.lines){
if(this.opt.tension){
run.dyn.shadow = s.createPath(dc.curve(spoly, this.opt.tension)).setStroke(shadow).getStroke();
} else {
run.dyn.shadow = s.createPolyline(spoly).setStroke(shadow).getStroke();
}
}
if(this.opt.markers){
shadow = theme.marker.shadow;
shadowMarkers = arr.map(spoly, function(c){
return s.createPath("M" + c.x + " " + c.y + " " + theme.symbol).
setStroke(shadow).setFill(shadow.color);
}, this);
}
}
if(this.opt.lines){
if(outline){
if(this.opt.tension){
run.dyn.outline = s.createPath(lpath).setStroke(outline).getStroke();
} else {
run.dyn.outline = s.createPolyline(lpoly).setStroke(outline).getStroke();
}
}
if(this.opt.tension){
run.dyn.stroke = s.createPath(lpath).setStroke(theme.series.stroke).getStroke();
} else {
run.dyn.stroke = s.createPolyline(lpoly).setStroke(theme.series.stroke).getStroke();
}
}
if(this.opt.markers){
frontMarkers = new Array(lpoly.length);
outlineMarkers = new Array(lpoly.length);
outline = null;
if(theme.marker.outline){
outline = dc.makeStroke(theme.marker.outline);
outline.width = 2 * outline.width + (theme.marker.stroke ? theme.marker.stroke.width : 0);
}
arr.forEach(lpoly, function(c, i){
var path = "M" + c.x + " " + c.y + " " + theme.symbol;
if(outline){
outlineMarkers[i] = s.createPath(path).setStroke(outline);
}
frontMarkers[i] = s.createPath(path).setStroke(theme.marker.stroke).setFill(theme.marker.fill);
}, this);
if(events){
var eventSeries = new Array(frontMarkers.length);
arr.forEach(frontMarkers, function(s, i){
var o = {
element: "marker",
index: i,
run: run,
shape: s,
outline: outlineMarkers[i] || null,
shadow: shadowMarkers && shadowMarkers[i] || null,
cx: lpoly[i].x,
cy: lpoly[i].y,
x: i + 1,
y: run.data[i]
};
this._connectEvents(o);
eventSeries[i] = o;
}, this);
this._eventSeries[run.name] = eventSeries;
}else{
delete this._eventSeries[run.name];
}
}
run.dirty = false;
// update the accumulator
for(var j = 0; j < run.data.length; ++j){
var v = run.data[j];
if(v !== null){
if(isNaN(v)){ v = 0; }
acc[j] -= v;
}
}
}
this.dirty = false;
return this; // dojox.charting.plot2d.Stacked
}
});
}); | PypiClean |
/OctoPrint-1.9.2.tar.gz/OctoPrint-1.9.2/src/octoprint/vendor/sockjs/tornado/transports/htmlfile.py | from __future__ import absolute_import, division, print_function, unicode_literals
"""
sockjs.tornado.transports.htmlfile
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
HtmlFile transport implementation.
"""
import re
from octoprint.vendor.sockjs.tornado import proto
from octoprint.vendor.sockjs.tornado.transports import streamingbase
from octoprint.vendor.sockjs.tornado.util import no_auto_finish
try:
# noinspection PyCompatibility
from html import escape
except:
# noinspection PyDeprecation
from cgi import escape
RE = re.compile(r'[\W_]+')
# HTMLFILE template
HTMLFILE_HEAD = r'''
<!doctype html>
<html><head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
</head><body><h2>Don't panic!</h2>
<script>
document.domain = document.domain;
var c = parent.%s;
c.start();
function p(d) {c.message(d);};
window.onload = function() {c.stop();};
</script>
'''.strip()
HTMLFILE_HEAD += ' ' * (1024 - len(HTMLFILE_HEAD) + 14)
HTMLFILE_HEAD += '\r\n\r\n'
class HtmlFileTransport(streamingbase.StreamingTransportBase):
name = 'htmlfile'
def initialize(self, server):
super(HtmlFileTransport, self).initialize(server)
@no_auto_finish
def get(self, session_id):
# Start response
self.preflight()
self.handle_session_cookie()
self.disable_cache()
self.set_header('Content-Type', 'text/html; charset=UTF-8')
# Grab callback parameter
callback = self.get_argument('c', None)
if not callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# TODO: Fix me - use parameter
self.write(HTMLFILE_HEAD % escape(RE.sub('', callback)))
self.flush()
# Now try to attach to session
if not self._attach_session(session_id):
self.finish()
return
# Flush any pending messages
if self.session:
self.session.flush()
def send_pack(self, message, binary=False):
if binary:
raise Exception('binary not supported for HtmlFileTransport')
# TODO: Just do escaping
msg = '<script>\np(%s);\n</script>\r\n' % proto.json_encode(message)
self.active = False
try:
self.notify_sent(len(message))
self.write(msg)
self.flush().add_done_callback(self.send_complete)
except IOError:
# If connection dropped, make sure we close offending session instead
# of propagating error all way up.
self.session.delayed_close()
self._detach() | PypiClean |
/MeaningCloud-python-2.0.0.tar.gz/MeaningCloud-python-2.0.0/meaningcloud/Request.py | import requests
import sys
if sys.version_info.major < 3:
from urllib import urlencode
elif sys.version_info.major == 3:
from urllib.parse import urlencode
class Request:
_timeout = 60
_url = ""
_key = ""
_params = {}
_file = {}
CONTENT_TYPE_TXT = 'txt'
CONTENT_TYPE_URL = 'url'
CONTENT_TYPE_FILE = 'doc'
def __init__(self, url, key):
"""
Request constructor
:param url:
URL of the API against which the request will be made
:param key:
License key
"""
if not url or not key:
raise ValueError("URL and key cannot be empty")
self._url = url
self._key = key
self.addParam('key', key)
def addParam(self, paramName, paramValue):
"""
Add a parameter to the request
:param paramName:
Name of the parameter
:param paramValue:
Value of the parameter
"""
if not paramName:
raise ValueError('paramName cannot be empty')
self._params[paramName] = paramValue
def setContent(self, type_, value):
"""
Sets the content that's going to be sent to analyze according to its type
:param type_:
Type of the content (text, file or url)
:param value:
Value of the content
"""
if type_ in [self.CONTENT_TYPE_TXT, self.CONTENT_TYPE_URL,
self.CONTENT_TYPE_FILE]:
if type_ == self.CONTENT_TYPE_FILE:
self._file = {}
self._file = {'doc': open(value, 'rb')}
else:
self.addParam(type_, value)
def setContentTxt(self, txt):
"""
Sets a text content to send to the API
:param txt:
Text to be sent to the API
"""
self.setContent(self.CONTENT_TYPE_TXT, txt)
def setContentUrl(self, url):
"""
Sets a URL content to send to the API
:param url:
URL to be analyzed by the API
"""
self.setContent(self.CONTENT_TYPE_URL, url)
def setContentFile(self, file):
"""
Sets a File content to send to the API.
:param file:
File to be sent to the API
"""
self.setContent(self.CONTENT_TYPE_FILE, file)
def sendRequest(self, extraHeaders=""):
"""
Sends a request to the URL specified and returns a response only if the HTTP code returned is OK
:param extraHeaders:
Allows to configure additional headers in the request
:return:
Response object set to None if there is an error
"""
if not 'src' in self._params.keys():
self.addParam('src', 'mc-python')
params = urlencode(self._params)
url = self._url
if 'doc' in self._file.keys():
headers = {}
if (extraHeaders is not None) and (extraHeaders is dict):
headers = headers.update(extraHeaders)
result = requests.post(url=url, data=self._params, files=self._file, headers=headers)
result.encoding = 'utf-8'
return result
else:
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
if (extraHeaders is not None) and (extraHeaders is dict):
headers = headers.update(extraHeaders)
result = requests.request("POST", url=url, data=params, headers=headers, timeout=self._timeout)
result.encoding = 'utf-8'
return result
# Getters and Setters
def getUrl(self):
"""
Get the url of the request
:return:
String with the url
"""
return self._url
def setUrl(self, url):
"""
Set a new URL
:param url:
New URL
"""
self._url = url
def getParams(self):
"""
Get the params attribute
:return:
params attribute
"""
return self._params
def getTimeout(self):
"""
Get the timeout value
:return:
timeout value
"""
return self._timeout
def setTimeout(self, timeout):
"""
Set a new timeout value
:param timeout:
New timeout
"""
self._timeout = timeout
def getFile(self):
"""
Get the file attribute
:return:
file attribute
"""
return self._file | PypiClean |
/Kadabra-0.5.0.tar.gz/Kadabra-0.5.0/kadabra/channels.py | from .metrics import Metrics
import logging, json
class RedisChannel(object):
"""A channel for transporting metrics using Redis.
:type host: string
:param host: The host of the Redis server.
:type port: int
:param port: The port of the Redis server.
:type db: int
:param db: The database to use on the Redis server. This should be used
exclusively for Kadabra to prevent collisions with keys that
might be used by your application.
:type logger: string
:param logger: The name of the logger to use.
"""
#: Default arguments for the Redis channel. These will be used by the
#: client and agent to initialize this channel if custom configuration
#: values are not provided.
DEFAULT_ARGS = {
"host": "localhost",
"port": 6379,
"db": 0,
"logger": "kadabra.channel",
"queue_key": "kadabra_queue",
"inprogress_key": "kadabra_inprogress"
}
def __init__(self, host, port, db, logger, queue_key, inprogress_key):
from redis import StrictRedis
self.client = StrictRedis(host=host, port=port, db=db)
self.logger = logging.getLogger(logger)
self.queue_key = queue_key
self.inprogress_key = inprogress_key
def send(self, metrics):
"""Send metrics to a Redis list, which will act as queue for pending
metrics to be received and published.
:type metrics: ~kadabra.Metrics
:param metrics: The metrics to be sent.
"""
to_push = metrics.serialize()
self.logger.debug("Sending %s" % to_push)
self.client.lpush(self.queue_key, json.dumps(to_push))
self.logger.debug("Successfully sent %s" % to_push)
def receive(self):
"""Receive metrics from the queue so they can be published. Once
received, the metrics will be moved into a temporary "in progress"
queue until they have been acknowledged as published (by calling
:meth:`~kadabra.channels.RedisChannel.complete`). This method will
block until there are metrics available on the queue or after 10
seconds.
:rtype: ~kadabra.Metrics
:returns: The metrics to be published, or None if there were no metrics
received after the timeout.
"""
self.logger.debug("Receiving metrics")
raw = self.client.brpoplpush(self.queue_key, self.inprogress_key,
timeout=10)
if raw:
rv = json.loads(raw)
self.logger.debug("Got metrics: %s" % rv)
return Metrics.deserialize(rv)
self.logger.debug("No metrics received")
return None
def receive_batch(self, max_batch_size):
"""Receive a list of metrics from the queue so they can be published.
Once received, all metrics will be moved into a temporary "in progress"
queue until they have been acknowledged as published (by calling
:meth:`~kadabra.channels.RedisChannel.complete`). The number of metrics
that are received is less than or equal to the ``max_batch_size``, and
possibly empty.
:type max_batch_size: int
:param max_batch_size: The maximum number of metrics to receive in the
batch.
:rtype: list
:returns: The list of metrics to be published. The size of the list is
less than or equal to the ``max_batch_size``, and possibly
empty if there are no metrics in the queue.
"""
self.logger.debug("Receiving batch of metrics")
pipeline = self.client.pipeline()
for i in range(max_batch_size):
pipeline.rpoplpush(self.queue_key, self.inprogress_key)
return [Metrics.deserialize(json.loads(m)) for m in pipeline.execute()
if m is not None]
def complete(self, metrics):
"""Mark a list of metrics as completed by removing them from the
in-progress queue.
:type metrics: list
:param metrics: The list of :class:`~kadabra.Metrics` to mark as
complete.
"""
if len(metrics) > 0:
pipeline = self.client.pipeline()
for m in metrics:
pipeline.lrem(self.inprogress_key, 1,
json.dumps(m.serialize()))
pipeline.execute()
def in_progress(self, query_limit):
"""Return a list of the metrics that are in_progress.
:type query_limit: int
:param query_limit: The maximum number of items to get from the in
progress queue.
:rtype: list
:returns: A list of :class:`Metric`\s that are in progress.
"""
in_progress = self.client.lrange(self.inprogress_key, 0,\
query_limit - 1)
self.logger.debug("Found %s in progress metrics" % len(in_progress))
return [Metrics.deserialize(json.loads(m))\
for m in in_progress] | PypiClean |
/MindustryCompiler-2.1-py3-none-any.whl/compiler/yacc/grammar/InstrASM/op.py | from .._start import grammar, YaccProduction
from .. import asmError as err
from ...classes import AsmInst, Variable, KeyWord
@grammar
def opResultTwoArgs(p: YaccProduction):
'''ligne : op opTwoArgs ID instrArgs EndLine'''
args = p[4]
if len(args) != 2:
raise err.tooManyArgs(p, 2, len(args))
p[0] = AsmInst(KeyWord(p[1]), [p[2], Variable(p[3]), *args])
@grammar
def opResultOneArgs(p: YaccProduction):
'''ligne : op opOneArgs ID instrArgs EndLine'''
args = p[4]
if len(args) != 1:
raise err.tooManyArgs(p, 1, len(args))
p[0] = AsmInst(KeyWord(p[1]), [p[2], Variable(p[3]), *args])
@grammar
def opKeyword_error(p: YaccProduction):
'''ligne : op error'''
raise err.invalideSubInstr(p)
@grammar
def opTwoArgsResult_error(p: YaccProduction):
'''ligne : op opTwoArgs error
| op opOneArgs error'''
raise err.mustBeVar(p, 3)
@grammar
def opTwoArgsArgs_error(p: YaccProduction):
'''ligne : op opTwoArgs ID instrArgs error'''
raise err.maybeNotEnoughtArgs(p, 2)
@grammar
def opOneArgsArgs_error(p: YaccProduction):
'''ligne : op opOneArgs ID instrArgs error'''
raise err.maybeNotEnoughtArgs(p, 1)
@grammar
def opTwoArgs(p: YaccProduction):
'''opTwoArgs : add
| sub
| mul
| div
| idiv
| mod
| pow
| equal
| notEqual
| land
| lessThan
| lessThanEq
| greaterThan
| strictEqual
| shl
| shr
| or
| and
| xor
| not
| max
| min
| angle
| len
| noise'''
p[0] = KeyWord(p[1])
@grammar
def opOneArgs(p: YaccProduction):
'''opOneArgs : abs
| log
| log10
| sin
| cos
| tan
| floor
| ceil
| sqrt
| rand
'''
p[0] = KeyWord(p[1]) | PypiClean |
/MozPhab-1.4.3-py3-none-any.whl/mozphab/repository.py |
import json
import os
import urllib.parse
from typing import (
List,
Optional,
)
from mozphab import environment
from .conduit import conduit, normalise_reviewer
from .exceptions import Error
from .helpers import (
get_arcrc_path,
has_arc_rejections,
read_json_field,
)
from .logger import logger
from .spinner import wait_message
MOZILLA_DOMAINS = {
".mozilla.org",
".mozilla.com",
".allizom.org",
}
def is_mozilla_phabricator(url):
"""Return `True` if the `url` is a Mozilla owned domain."""
phab_host = urllib.parse.urlparse(url).hostname
if not phab_host:
return False
return any(phab_host.endswith(domain) for domain in MOZILLA_DOMAINS)
class Repository(object):
def __init__(self, path, dot_path, phab_url=None):
self._phid = None
self._phab_repo = None
self._phab_vcs = None
self.vcs = None
self.path = path # base repository directory
self.dot_path = dot_path # .hg/.git directory
self._arcconfig_files = [
os.path.join(self.dot_path, ".arcconfig"),
os.path.join(self.path, ".arcconfig"),
]
self.args = None
self.phab_url = (phab_url or self._phab_url()).rstrip("/")
self.api_url = self._api_url()
self.call_sign = self._get_setting("repository.callsign")
self.bmo_url = self._get_setting("bmo_url")
if self.bmo_url:
if not (
urllib.parse.urlparse(self.bmo_url).scheme == "https"
or environment.HTTP_ALLOWED
):
raise Error("Only https connections are allowed.")
elif is_mozilla_phabricator(self.phab_url):
self.bmo_url = "https://bugzilla.mozilla.org"
def is_worktree_clean(self):
"""Check if the working tree is clean."""
def before_submit(self):
"""Executed before the submit commit."""
def after_submit(self):
"""Executed after the submit commit."""
def _get_setting(self, key):
"""Read settings from .arcconfig"""
value = read_json_field(self._arcconfig_files, [key])
return value
def _phab_url(self):
"""Determine the phab/conduit URL."""
# In order of priority as per arc
# FIXME: This should also check {.hg|.git}/arc/config, which is where
# `arc set-config --local` writes to. See bug 1497786.
defaults_files = [get_arcrc_path()]
if environment.IS_WINDOWS:
defaults_files.append(
os.path.join(
os.getenv("ProgramData", ""), "Phabricator", "Arcanist", "config"
)
)
else:
defaults_files.append("/etc/arcconfig")
phab_url = (
self._get_setting("phabricator.uri")
or self._get_setting("conduit_uri")
or read_json_field(defaults_files, ["config", "default"])
)
if not phab_url:
raise Error("Failed to determine Phabricator URL (missing .arcconfig?)")
return phab_url
def cleanup(self):
"""Perform any repo-related cleanup tasks.
May be called multiple times.
If an exception is raised this is NOT called (to avoid dataloss)."""
def finalize(self, commits):
"""Update the history after node changed."""
def set_args(self, args):
if (
hasattr(args, "single")
and args.single
and args.end_rev != environment.DEFAULT_END_REV
):
raise Error("Option --single can be used with only one identifier.")
self.args = args
def untracked(self):
"""Return a list of untracked files."""
def commit_stack(self, **kwargs):
"""Return list of commits.
List of dicts with the following keys:
name human readable identifier of commit (eg. short sha)
node SHA1 in stack
orig-node an original SHA1 of the commit
title first line of commit description (unaltered)
body commit description, excluding first line
title-preview title with bug-id and reviewer modifications
bug-id bmo bug-id
bug-id-orig original bug-id from commit desc
reviewers list of reviewers
rev-id phabricator revision id
parent SHA1 of the parent commit
author-date string representation of the commit creation time
author-date-epoch
author-name
author-email
"""
def refresh_commit_stack(self, commits):
"""Update the stack following an altering change (eg rebase)."""
def is_node(self, node):
"""Check if node exists.
Returns a Boolean.
"""
def check_node(self, node):
"""Check if node exists.
Returns a node if found.
Raises NotFoundError if node not found in the repository.
"""
def checkout(self, node):
"""Checkout/Update to specified node."""
def commit(self, body):
"""Commit the changes in the working directory."""
def amend_commit(self, commit, commits):
"""Amend commit description from `title` and `desc` fields"""
def is_descendant(self, node: str) -> bool:
"""Return `True` if the repository revset is descendant from `node`."""
def map_callsign_to_unified_head(self, callsign: str) -> Optional[str]:
"""Return the expected VCS identifier for the given callsign.
Returns a VCS identifier that corresponds to the given Phabricator repository
callsign. Confirms the identified head exists in the repository.
"""
def uplift_commits(self, dest: str, commits: List[dict]) -> List[dict]:
"""Uplift the repo's revset onto `dest` and returns the refreshed `commits`."""
def rebase_commit(self, source_commit, dest_commit):
"""Rebase source onto destination."""
def before_patch(self, node, name):
"""Prepare repository to apply the patches."""
def apply_patch(self, diff, body, author, author_date):
"""Apply the patch and commit the changes."""
def format_patch(self, diff, body, author, author_date):
"""Format a patch appropriate for importing."""
def check_commits_for_submit(self, commits, *, require_bug=True):
"""Validate the list of commits (from commit_stack) are ok to submit"""
errors = []
warnings = []
# Extract a set of reviewers and verify first; they will be displayed
# with other commit errors.
all_reviewers = {}
reviewer_commit_map = {}
commit_invalid_reviewers = {}
rev_ids_to_names = dict()
for commit in commits:
commit_invalid_reviewers[commit["node"]] = []
if not commit["rev-id"]:
continue
names = rev_ids_to_names.setdefault(commit["rev-id"], [])
names.append(commit["name"])
for rev_id, names in rev_ids_to_names.items():
if len(names) < 2:
continue
error_msg = (
"Phabricator revisions should be unique, but the following "
"commits refer to the same one (D{}):\n".format(rev_id)
)
for name in names:
error_msg += "* %s\n" % name
errors.append(error_msg)
# Flatten and deduplicate reviewer list, keeping track of the
# associated commit
for commit in commits:
# We can ignore reviewers on WIP commits, as they won't be passed to Phab
if commit["wip"]:
continue
for group in list(commit["reviewers"].keys()):
for reviewer in commit["reviewers"][group]:
all_reviewers.setdefault(group, set())
all_reviewers[group].add(reviewer)
reviewer = normalise_reviewer(reviewer)
reviewer_commit_map.setdefault(reviewer, [])
reviewer_commit_map[reviewer].append(commit["node"])
# Verify all reviewers in a single call
for invalid_reviewer in conduit.check_for_invalid_reviewers(all_reviewers):
for node in reviewer_commit_map[
normalise_reviewer(invalid_reviewer["name"])
]:
commit_invalid_reviewers[node].append(invalid_reviewer)
unavailable_reviewers_warning = False
for commit in commits:
commit_errors = []
commit_warnings = []
if require_bug and not commit["bug-id"]:
commit_errors.append("missing bug-id")
if has_arc_rejections(commit["body"]):
commit_errors.append("contains arc fields")
if commit["rev-id"]:
revisions = conduit.get_revisions(ids=[int(commit["rev-id"])])
if len(revisions) == 0:
commit_errors.append(
"Phabricator did not return a query result for revision D%s"
" (it might be inaccessible or not exist at all)"
% commit["rev-id"]
)
# commit_issues identified below this are commit_errors unless
# self.args.force is True, which makes them commit_warnings
commit_issues = (
commit_warnings if self.args and self.args.force else commit_errors
)
for reviewer in commit_invalid_reviewers[commit["node"]]:
if "disabled" in reviewer:
commit_errors.append("User %s is disabled" % reviewer["name"])
elif "until" in reviewer:
unavailable_reviewers_warning = True
msg = "%s is not available until %s" % (
reviewer["name"],
reviewer["until"],
)
commit_issues.append(msg)
else:
commit_errors.append(
"%s is not a valid reviewer's name" % reviewer["name"]
)
if commit_errors:
errors.append(
"%s %s\n- %s"
% (commit["name"], commit["title"], "\n- ".join(commit_errors))
)
if commit_warnings:
warnings.append(
"%s %s\n- %s"
% (commit["name"], commit["title"], "\n- ".join(commit_warnings))
)
if errors:
raise Error("\n\n".join(errors))
if warnings:
logger.warning("\n\n".join(warnings))
if unavailable_reviewers_warning:
logger.warning("Notice: reviewer availability overridden.")
def _api_url(self):
"""Return a base URL for conduit API call"""
url = urllib.parse.urljoin(self.phab_url, "api/")
if not (
urllib.parse.urlparse(url).scheme == "https" or environment.HTTP_ALLOWED
):
raise Error("Only https connections are allowed.")
return url
@property
def phab_repo(self):
"""Representation of the Repository in Phabricator API."""
if not self._phab_repo:
with wait_message("Reading repository data"):
self._phab_repo = conduit.get_repository_by_callsign(self.call_sign)
return self._phab_repo
@property
def phid(self):
"""PHID of the repository.
This value does not change over time.
It is stored in a file to avoid calling the API on every run.
"""
if not self._phid:
path = os.path.join(self.dot_path, ".moz-phab_phid")
if os.path.isfile(path):
with open(path) as f:
try:
repo_phids = json.load(f)
except json.decoder.JSONDecodeError:
# File is probably using the old format.
repo_phids = {}
repo_phid = repo_phids.get(self.call_sign, None)
else:
repo_phids = {}
repo_phid = None
if not repo_phid:
repo_phid = self.phab_repo["phid"]
repo_phids[self.call_sign] = repo_phid
with open(path, "w") as f:
json.dump(repo_phids, f)
self._phid = repo_phid
return self._phid
def check_vcs(self):
"""`Git.check_vcs` raises if cinnabar required and not installed."""
if self.args.force_vcs:
return True
if self.vcs != self.phab_vcs:
# This error is captured in Git and not raised if Cinnabar installed.
raise Error(
"Local VCS ({local}) is different from the one defined in the "
"repository ({remote}).".format(local=self.vcs, remote=self.phab_vcs)
)
return True
@property
def phab_vcs(self):
"""Version Control System short name stored in Phabricator.
This value does not change over time.
It is stored in a file to avoid calling the API on every run.
"""
if not self._phab_vcs:
# check file
path = os.path.join(self.dot_path, ".moz-phab_vcs")
if os.path.isfile(path):
with open(path) as f:
self._phab_vcs = f.readline()
else:
self._phab_vcs = self.phab_repo["fields"]["vcs"]
with open(path, "w") as f:
f.write(self._phab_vcs)
return self._phab_vcs
def get_public_node(self, node):
"""Hashtag in a remote VCS."""
return node
def validate_email(self):
"""Validate a user's configured email address.""" | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/dense_heads/anchor_free_head.py | import warnings
from abc import abstractmethod
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import force_fp32
from mmdet.core import build_bbox_coder, multi_apply
from mmdet.core.anchor.point_generator import MlvlPointGenerator
from ..builder import HEADS, build_loss
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
@HEADS.register_module()
class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
"""Anchor-free head (FCOS, Fovea, RepPoints, etc.).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
stacked_convs (int): Number of stacking convs of the head.
strides (tuple): Downsample factor of each feature map.
dcn_on_last_conv (bool): If true, use dcn in the last layer of
towers. Default: False.
conv_bias (bool | str): If specified as `auto`, it will be decided by
the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
None, otherwise False. Default: "auto".
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
bbox_coder (dict): Config of bbox coder. Defaults
'DistancePointBBoxCoder'.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
init_cfg (dict or list[dict], optional): Initialization config dict.
""" # noqa: W605
_version = 1
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
dcn_on_last_conv=False,
conv_bias='auto',
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
bbox_coder=dict(type='DistancePointBBoxCoder'),
conv_cfg=None,
norm_cfg=None,
train_cfg=None,
test_cfg=None,
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='conv_cls',
std=0.01,
bias_prob=0.01))):
super(AnchorFreeHead, self).__init__(init_cfg)
self.num_classes = num_classes
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.dcn_on_last_conv = dcn_on_last_conv
assert conv_bias == 'auto' or isinstance(conv_bias, bool)
self.conv_bias = conv_bias
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.bbox_coder = build_bbox_coder(bbox_coder)
self.prior_generator = MlvlPointGenerator(strides)
# In order to keep a more general interface and be consistent with
# anchor_head. We can think of point like one anchor
self.num_base_priors = self.prior_generator.num_base_priors[0]
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
"""Initialize layers of the head."""
self._init_cls_convs()
self._init_reg_convs()
self._init_predictor()
def _init_cls_convs(self):
"""Initialize classification conv layers of the head."""
self.cls_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias))
def _init_reg_convs(self):
"""Initialize bbox regression conv layers of the head."""
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias))
def _init_predictor(self):
"""Initialize predictor layers of the head."""
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""Hack some keys of the model state dict so that can load checkpoints
of previous version."""
version = local_metadata.get('version', None)
if version is None:
# the key is different in early versions
# for example, 'fcos_cls' become 'conv_cls' now
bbox_head_keys = [
k for k in state_dict.keys() if k.startswith(prefix)
]
ori_predictor_keys = []
new_predictor_keys = []
# e.g. 'fcos_cls' or 'fcos_reg'
for key in bbox_head_keys:
ori_predictor_keys.append(key)
key = key.split('.')
conv_name = None
if key[1].endswith('cls'):
conv_name = 'conv_cls'
elif key[1].endswith('reg'):
conv_name = 'conv_reg'
elif key[1].endswith('centerness'):
conv_name = 'conv_centerness'
else:
assert NotImplementedError
if conv_name is not None:
key[1] = conv_name
new_predictor_keys.append('.'.join(key))
else:
ori_predictor_keys.pop(-1)
for i in range(len(new_predictor_keys)):
state_dict[new_predictor_keys[i]] = state_dict.pop(
ori_predictor_keys[i])
super()._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually contain classification scores and bbox predictions.
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
"""
return multi_apply(self.forward_single, feats)[:2]
def forward_single(self, x):
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
Returns:
tuple: Scores for each class, bbox predictions, features
after classification and regression conv layers, some
models needs these features like FCOS.
"""
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.conv_cls(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
bbox_pred = self.conv_reg(reg_feat)
return cls_score, bbox_pred, cls_feat, reg_feat
@abstractmethod
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
"""
raise NotImplementedError
@abstractmethod
def get_targets(self, points, gt_bboxes_list, gt_labels_list):
"""Compute regression, classification and centerness targets for points
in multiple images.
Args:
points (list[Tensor]): Points of each fpn level, each has shape
(num_points, 2).
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
each has shape (num_gt, 4).
gt_labels_list (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
"""
raise NotImplementedError
def _get_points_single(self,
featmap_size,
stride,
dtype,
device,
flatten=False):
"""Get points of a single scale level.
This function will be deprecated soon.
"""
warnings.warn(
'`_get_points_single` in `AnchorFreeHead` will be '
'deprecated soon, we support a multi level point generator now'
'you can get points of a single level feature map '
'with `self.prior_generator.single_level_grid_priors` ')
h, w = featmap_size
# First create Range with the default dtype, than convert to
# target `dtype` for onnx exporting.
x_range = torch.arange(w, device=device).to(dtype)
y_range = torch.arange(h, device=device).to(dtype)
y, x = torch.meshgrid(y_range, x_range)
if flatten:
y = y.flatten()
x = x.flatten()
return y, x
def get_points(self, featmap_sizes, dtype, device, flatten=False):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
warnings.warn(
'`get_points` in `AnchorFreeHead` will be '
'deprecated soon, we support a multi level point generator now'
'you can get points of all levels '
'with `self.prior_generator.grid_priors` ')
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self._get_points_single(featmap_sizes[i], self.strides[i],
dtype, device, flatten))
return mlvl_points
def aug_test(self, feats, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
feats (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains features for all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[ndarray]: bbox results of each class
"""
return self.aug_test_bboxes(feats, img_metas, rescale=rescale) | PypiClean |
/Gxsphinx-markdown-tables-1.0.8.tar.gz/Gxsphinx-markdown-tables-1.0.8/sphinx_markdown_tables/__init__.py | import re
from sphinx_markdown_tables import __version__
def get_str_len(in_str):
str_len = 0
for s in in_str:
if s >= '\u4e00' and s <= '\u9fff':
str_len += 2
elif s >= '\uff00' and s <= '\uffef':
str_len += 2
else:
str_len += 1
return str_len
tex = ''
def create_tex_body(data):
global tex
tex += data[0] + '\n'
for i in data[1:]:
tex += '&\n' + i + '\n'
tex += '\\\\%\n' + '\\hline\n'
# >>>>>>>>>>>>>>>>>>>
def create_tex_resize_tail():
global tex
tex += '\\end{tabulary}\n' + \
'}\n' + \
'\\par\n' + \
'\\end{table*}\n' + \
'\\FloatBarrier\n' + \
'\\sphinxattableend\\end{savenotes}\n'
def create_tex_resize_title(align, data):
global tex
tex += '\\begin{savenotes}\\sphinxattablestart\n' + \
'\\FloatBarrier\n'+ \
'\\begin{table*}\n' + \
'\\tiny\n' + \
'\\rowcolors{2}{gray!20}{white}\n' + \
'\\centering\n' + \
'\\resizebox{\\textwidth}{!}{\n' + \
'\\begin{tabulary}{\\linewidth}[t]{|'
for i in align:
tex += i + '|'
tex += 'l|l|}\n\\hline\n'
tex += '\\sphinxstyletheadfamily\n' + data[0] + '\n'
for i in data[1:]:
tex += '&\\sphinxstyletheadfamily\n' + i + '\n'
tex += '\\\\%\n' + '\\hline\n'
def create_tex_resize_table(data):
create_tex_resize_title(data[0], data[1])
for i in data[2:]:
create_tex_body(i)
create_tex_resize_tail()
# >>>>>>>>>>>>>>>>>>>
def create_tex_long_tail():
global tex
tex += '\\end{longtable}\n' + \
'\\par\n' + \
'\\sphinxattableend\\end{savenotes}\n'
def create_tex_long_title(align, data):
global tex
tex += '\\begin{savenotes}\\sphinxattablestart\n' + \
'\\rowcolors{2}{gray!20}{white}\n' + \
'\\centering\n' + \
'\\begin{longtable}[c]{|'
for i in align:
tex += i + '|'
tex += 'l|l|}\n\\hline\n'
tex += '\\sphinxstyletheadfamily\n' + data[0] + '\n'
for i in data[1:]:
tex += '&\\sphinxstyletheadfamily\n' + i + '\n'
tex += '\\\\%\n' + '\\hline\n'
def create_tex_long_table(data):
create_tex_long_title(data[0], data[1])
for i in data[2:]:
create_tex_body(i)
create_tex_long_tail()
# >>>>>>>>>>>>>>>>>>>
def create_tex_tail():
global tex
tex += '\\end{tabulary}\n' + \
'\\par\n' + \
'\\end{table*}\n' + \
'\\sphinxattableend\\end{savenotes}\n'
def create_tex_title(align, data):
global tex
tex += '\\begin{savenotes}\\sphinxattablestart\n' + \
'\\begin{table*}\n' + \
'\\rowcolors{2}{gray!20}{white}\n' + \
'\\centering\n' + \
'\\begin{tabulary}{\\linewidth}[t]{|'
for i in align:
tex += i + '|'
tex += '}\n\\hline\n'
tex += '\\sphinxstyletheadfamily\n' + data[0] + '\n'
for i in data[1:]:
tex += '&\\sphinxstyletheadfamily\n' + i + '\n'
tex += '\\\\%\n' + '\\hline\n'
def create_tex_table(data):
create_tex_title(data[0], data[1])
for i in data[2:]:
create_tex_body(i)
create_tex_tail()
# >>>>>>>>>>>>>>>>>>
def create_tex_x_tail():
global tex
tex += '\\end{tabularx}\n' + \
'\\par\n' + \
'\\end{table*}\n' + \
'\\FloatBarrier\n' + \
'\\sphinxattableend\\end{savenotes}\n'
def create_tex_x_title(align, data):
global tex
tex += '\\begin{savenotes}\\sphinxattablestart\n' + \
'\\FloatBarrier\n' + \
'\\begin{table*}\n' + \
'\\rowcolors{1}{gray!20}{white}\n' + \
'\\centering\n' + \
'\\begin{tabularx}{\\textwidth}[t]{|'
for i in align:
if i == 'c':
tex += 'X<{\\centering\\arraybackslash}|'
elif i == 'l':
tex += 'X<{\\raggedright\\arraybackslash}|'
elif i == 'r':
tex += 'X<{\\raggedleft\\arraybackslash}|'
tex += 'l|l|}\n\\hline\n'
tex += '\\sphinxstyletheadfamily\n' + data[0] + '\n'
for i in data[1:]:
tex += '&\\sphinxstyletheadfamily\n' + i + '\n'
tex += '\\\\%\n' + '\\hline\n'
def create_tex_x_table(data):
create_tex_x_title(data[0], data[1])
for i in data[2:]:
create_tex_body(i)
create_tex_x_tail()
# >>>>>>>>>>>>>>>>>>
def create_tex_des_title(data):
global tex
tex += '\\begin{savenotes}\\sphinxattablestart\n' + \
'\\FloatBarrier\n' + \
'\\begin{table*}[htbp]\n' + \
'\\rowcolors{1}{gray!20}{white}\n' + \
'\\centering\n' + \
'\\begin{tabularx}{\\textwidth}[t]{|c|l|c|c|'
tex += 'X<{\\raggedright\\arraybackslash}|'
tex += 'l|l|}\n\\hline\n'
tex += '\\sphinxstyletheadfamily\n' + data[0] + '\n'
for i in data[1:]:
tex += '&\\sphinxstyletheadfamily\n' + i + '\n'
tex += '\\\\%\n' + '\\hline\n'
def create_tex_des_table(data):
create_tex_des_title(data[1])
for i in data[2:]:
create_tex_body(i)
create_tex_x_tail()
# >>>>>>>>>>>>>>>>>>
def setup(app):
app.connect('source-read', process_tables)
return {'version': __version__,
'parallel_read_safe': True}
def process_tables(app, docname, source):
"""
Convert markdown tables to html, since recommonmark can't. This requires 3 steps:
Snip out table sections from the markdown
Convert them to html
Replace the old markdown table with an html table
This function is called by sphinx for each document. `source` is a 1-item list. To update the document, replace
element 0 in `source`.
"""
global tex
import markdown
md = markdown.Markdown(extensions=['markdown.extensions.tables'])
table_processor = markdown.extensions.tables.TableProcessor(md.parser)
raw_markdown = source[0]
blocks = re.split(r'(\n{2,})', raw_markdown)
for i, block in enumerate(blocks):
if table_processor.test(None, block):
data = get_table_msg(block)
if len(data) >= 40:
create_tex_long_table(data)
elif len(data[0]) >=4:
if len(data[0]) == 5 and data[1] == ['位域','变量名', '属性', '默认值', '描述']:
create_tex_des_table(data)
else:
create_tex_resize_table(data)
else:
create_tex_x_table(data)
blocks[i] = tex
tex = ''
#html = md.convert(block)
#styled = html.replace('<table>', '<table border="1" class="docutils">', 1) # apply styling
#blocks[i] = styled
# re-assemble into markdown-with-tables-replaced
# must replace element 0 for changes to persist
source[0] = ''.join(blocks)
def get_table_msg(block):
data = []
start = 0
lines = block.strip('\n').split('\n')
for line in lines:
line = line.replace('&', '§')
if line.find(':-') >= 0 or line.find('-:') >= 0 or line.find('--')>= 0:
line = line.strip(' \n')
tmp = line.strip('|').split('|')
for i in range(len(tmp)):
if tmp[i].find(':-') >= 0 and tmp[i].find('-:') < 0:
data[0][i] = 'l'
elif tmp[i].find('-:') >= 0 and tmp[i].find(':-') < 0:
data[0][i] = 'r'
continue
if start == 1:
line = line.strip(' \n')
tmp = line.strip('|').split('|')
data.append(tmp)
if start == 0:
start = 1
align = []
line = line.strip(' \n')
tmp = line.strip('|').split('|')
for i in tmp:
align.append('c')
data.append(align)
data.append(tmp)
return data | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.